Browse Source

Merge tag 'staging-4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull staging/IIO updates from Greg KH:
 "Here is the big set of Staging/IIO driver patches for 4.17-rc1.

  It is a lot, over 500 changes, but not huge by previous kernel release
  standards. We deleted more lines than we added again (27k added vs.
  91k remvoed), thanks to finally being able to delete the IRDA drivers
  and networking code.

  We also deleted the ccree crypto driver, but that's coming back in
  through the crypto tree to you, in a much cleaned-up form.

  Added this round is at lot of "mt7621" device support, which is for an
  embedded device that Neil Brown cares about, and of course a handful
  of new IIO drivers as well.

  And finally, the fsl-mc core code moved out of the staging tree to the
  "real" part of the kernel, which is nice to see happen as well.

  Full details are in the shortlog, which has all of the tiny cleanup
  patches described.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'staging-4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (579 commits)
  staging: rtl8723bs: Remove yield call, replace with cond_resched()
  staging: rtl8723bs: Replace yield() call with cond_resched()
  staging: rtl8723bs: Remove unecessary newlines from 'odm.h'.
  staging: rtl8723bs: Rework 'struct _ODM_Phy_Status_Info_' coding style.
  staging: rtl8723bs: Rework 'struct _ODM_Per_Pkt_Info_' coding style.
  staging: rtl8723bs: Replace NULL pointer comparison with '!'.
  staging: rtl8723bs: Factor out rtl8723bs_recv_tasklet() sections.
  staging: rtl8723bs: Fix function signature that goes over 80 characters.
  staging: rtl8723bs: Fix lines too long in update_recvframe_attrib().
  staging: rtl8723bs: Remove unnecessary blank lines in 'rtl8723bs_recv.c'.
  staging: rtl8723bs: Change camel case to snake case in 'rtl8723bs_recv.c'.
  staging: rtl8723bs: Add missing braces in else statement.
  staging: rtl8723bs: Add spaces around ternary operators.
  staging: rtl8723bs: Fix lines with trailing open parentheses.
  staging: rtl8723bs: Remove unnecessary length #define's.
  staging: rtl8723bs: Fix IEEE80211 authentication algorithm constants.
  staging: rtl8723bs: Fix alignment in rtw_wx_set_auth().
  staging: rtl8723bs: Remove braces from single statement conditionals.
  staging: rtl8723bs: Remove unecessary braces from switch statement.
  staging: rtl8723bs: Fix newlines in rtw_wx_set_auth().
  ...
Linus Torvalds 7 năm trước cách đây
mục cha
commit
df34df483a
100 tập tin đã thay đổi với 2833 bổ sung5363 xóa
  1. 1 1
      Documentation/ABI/testing/sysfs-bus-iio-chemical-vz89x
  2. 2 2
      Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
  3. 48 0
      Documentation/devicetree/bindings/iio/adc/axp20x_adc.txt
  4. 6 2
      Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
  5. 27 0
      Documentation/devicetree/bindings/iio/potentiometer/ad5272.txt
  6. 28 0
      Documentation/devicetree/bindings/iio/temperature/mlx90632.txt
  7. 16 0
      Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-vchiq.txt
  8. 8 0
      Documentation/networking/dpaa2/index.rst
  9. 0 0
      Documentation/networking/dpaa2/overview.rst
  10. 1 0
      Documentation/networking/index.rst
  11. 0 10
      Documentation/networking/irda.txt
  12. 17 2
      MAINTAINERS
  13. 6 0
      arch/arm/boot/dts/bcm2835-rpi.dtsi
  14. 2 0
      drivers/bus/Kconfig
  15. 4 0
      drivers/bus/Makefile
  16. 16 0
      drivers/bus/fsl-mc/Kconfig
  17. 18 0
      drivers/bus/fsl-mc/Makefile
  18. 9 76
      drivers/bus/fsl-mc/dpbp.c
  19. 10 10
      drivers/bus/fsl-mc/dpcon.c
  20. 4 4
      drivers/bus/fsl-mc/dpmcp.c
  21. 1 1
      drivers/bus/fsl-mc/dprc-driver.c
  22. 16 15
      drivers/bus/fsl-mc/dprc.c
  23. 6 1
      drivers/bus/fsl-mc/fsl-mc-allocator.c
  24. 1 1
      drivers/bus/fsl-mc/fsl-mc-bus.c
  25. 1 0
      drivers/bus/fsl-mc/fsl-mc-msi.c
  26. 90 1
      drivers/bus/fsl-mc/fsl-mc-private.h
  27. 1 1
      drivers/bus/fsl-mc/mc-io.c
  28. 11 11
      drivers/bus/fsl-mc/mc-sys.c
  29. 1 5
      drivers/iio/accel/bmc150-accel-core.c
  30. 1 1
      drivers/iio/accel/hid-sensor-accel-3d.c
  31. 1 2
      drivers/iio/accel/st_accel_i2c.c
  32. 1 2
      drivers/iio/adc/Kconfig
  33. 23 3
      drivers/iio/adc/ad7476.c
  34. 157 11
      drivers/iio/adc/axp20x_adc.c
  35. 0 4
      drivers/iio/adc/ep93xx_adc.c
  36. 4 12
      drivers/iio/adc/ti-adc161s626.c
  37. 4 13
      drivers/iio/chemical/ams-iaq-core.c
  38. 4 12
      drivers/iio/chemical/atlas-ph-sensor.c
  39. 5 5
      drivers/iio/chemical/ccs811.c
  40. 4 13
      drivers/iio/chemical/vz89x.c
  41. 1 0
      drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
  42. 49 0
      drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
  43. 2 0
      drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
  44. 1 1
      drivers/iio/dac/ad5380.c
  45. 1 1
      drivers/iio/dac/ad5764.c
  46. 17 10
      drivers/iio/dummy/Kconfig
  47. 1 1
      drivers/iio/gyro/hid-sensor-gyro-3d.c
  48. 4 12
      drivers/iio/health/max30100.c
  49. 2 0
      drivers/iio/humidity/Kconfig
  50. 1 1
      drivers/iio/humidity/dht11.c
  51. 4 12
      drivers/iio/humidity/hdc100x.c
  52. 2 19
      drivers/iio/humidity/hts221.h
  53. 20 19
      drivers/iio/humidity/hts221_buffer.c
  54. 52 80
      drivers/iio/humidity/hts221_core.c
  55. 18 46
      drivers/iio/humidity/hts221_i2c.c
  56. 19 62
      drivers/iio/humidity/hts221_spi.c
  57. 22 7
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
  58. 102 59
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
  59. 100 4
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
  60. 10 0
      drivers/iio/light/Kconfig
  61. 1 0
      drivers/iio/light/Makefile
  62. 4 12
      drivers/iio/light/apds9960.c
  63. 1 0
      drivers/iio/light/cros_ec_light_prox.c
  64. 1 1
      drivers/iio/light/hid-sensor-als.c
  65. 1 1
      drivers/iio/light/lm3533-als.c
  66. 531 0
      drivers/iio/light/lv0104cs.c
  67. 1 1
      drivers/iio/magnetometer/hid-sensor-magn-3d.c
  68. 21 0
      drivers/iio/potentiometer/Kconfig
  69. 2 0
      drivers/iio/potentiometer/Makefile
  70. 231 0
      drivers/iio/potentiometer/ad5272.c
  71. 1 1
      drivers/iio/potentiometer/ds1803.c
  72. 194 0
      drivers/iio/potentiometer/mcp4018.c
  73. 4 12
      drivers/iio/potentiometer/tpl0102.c
  74. 4 12
      drivers/iio/potentiostat/lmp91000.c
  75. 1 1
      drivers/iio/pressure/ms5611.h
  76. 4 13
      drivers/iio/proximity/as3935.c
  77. 4 12
      drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
  78. 20 5
      drivers/iio/proximity/sx9500.c
  79. 12 0
      drivers/iio/temperature/Kconfig
  80. 1 0
      drivers/iio/temperature/Makefile
  81. 4 12
      drivers/iio/temperature/maxim_thermocouple.c
  82. 752 0
      drivers/iio/temperature/mlx90632.c
  83. 6 0
      drivers/irqchip/Kconfig
  84. 1 0
      drivers/irqchip/Makefile
  85. 1 3
      drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
  86. 14 4
      drivers/staging/Kconfig
  87. 8 3
      drivers/staging/Makefile
  88. 1 1
      drivers/staging/android/ashmem.c
  89. 1 1
      drivers/staging/android/ion/Kconfig
  90. 3 23
      drivers/staging/android/ion/ion.c
  91. 1 21
      drivers/staging/android/ion/ion.h
  92. 5 28
      drivers/staging/android/ion/ion_page_pool.c
  93. 14 62
      drivers/staging/android/ion/ion_system_heap.c
  94. 0 27
      drivers/staging/ccree/Kconfig
  95. 0 7
      drivers/staging/ccree/Makefile
  96. 0 10
      drivers/staging/ccree/TODO
  97. 0 2701
      drivers/staging/ccree/cc_aead.c
  98. 0 109
      drivers/staging/ccree/cc_aead.h
  99. 0 1651
      drivers/staging/ccree/cc_buffer_mgr.c
  100. 0 74
      drivers/staging/ccree/cc_buffer_mgr.h

+ 1 - 1
Documentation/ABI/testing/sysfs-bus-iio-chemical-vz89x

@@ -1,7 +1,7 @@
 What:		/sys/bus/iio/devices/iio:deviceX/in_concentration_VOC_short_raw
 Date:		September 2015
 KernelVersion:	4.3
-Contact:	Matt Ranostay <mranostay@gmail.com>
+Contact:	Matt Ranostay <matt.ranostay@konsulko.com>
 Description:
 		Get the raw calibration VOC value from the sensor.
 		This value has little application outside of calibration.

+ 2 - 2
Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935

@@ -1,7 +1,7 @@
 What		/sys/bus/iio/devices/iio:deviceX/in_proximity_input
 Date:		March 2014
 KernelVersion:	3.15
-Contact:	Matt Ranostay <mranostay@gmail.com>
+Contact:	Matt Ranostay <matt.ranostay@konsulko.com>
 Description:
 		Get the current distance in meters of storm (1km steps)
 		1000-40000 = distance in meters
@@ -9,7 +9,7 @@ Description:
 What		/sys/bus/iio/devices/iio:deviceX/sensor_sensitivity
 Date:		March 2014
 KernelVersion:	3.15
-Contact:	Matt Ranostay <mranostay@gmail.com>
+Contact:	Matt Ranostay <matt.ranostay@konsulko.com>
 Description:
 		Show or set the gain boost of the amp, from 0-31 range.
 		18 = indoors (default)

+ 48 - 0
Documentation/devicetree/bindings/iio/adc/axp20x_adc.txt

@@ -0,0 +1,48 @@
+* X-Powers AXP ADC bindings
+
+Required properties:
+  - compatible: should be one of:
+    - "x-powers,axp209-adc",
+    - "x-powers,axp221-adc",
+    - "x-powers,axp813-adc",
+  - #io-channel-cells: should be 1,
+
+Example:
+
+&axp22x {
+	adc {
+		compatible = "x-powers,axp221-adc";
+		#io-channel-cells = <1>;
+	};
+};
+
+ADC channels and their indexes per variant:
+
+AXP209
+------
+ 0 | acin_v
+ 1 | acin_i
+ 2 | vbus_v
+ 3 | vbus_i
+ 4 | pmic_temp
+ 5 | gpio0_v
+ 6 | gpio1_v
+ 7 | ipsout_v
+ 8 | batt_v
+ 9 | batt_chrg_i
+10 | batt_dischrg_i
+
+AXP22x
+------
+ 0 | pmic_temp
+ 1 | batt_v
+ 2 | batt_chrg_i
+ 3 | batt_dischrg_i
+
+AXP813
+------
+ 0 | pmic_temp
+ 1 | gpio0_v
+ 2 | batt_v
+ 3 | batt_chrg_i
+ 4 | batt_dischrg_i

+ 6 - 2
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt

@@ -32,6 +32,10 @@ Optional properties:
 		  to "clock" property. Frequency must be a multiple of the rcc
 		  clock frequency. If not, SPI CLKOUT frequency will not be
 		  accurate.
+- pinctrl-names:	Set to "default".
+- pinctrl-0:		List of phandles pointing to pin configuration
+			nodes to set pins in mode of operation for dfsdm
+			on external pin.
 
 Contents of a STM32 DFSDM child nodes:
 --------------------------------------
@@ -68,8 +72,8 @@ Optional properties:
 - st,adc-channel-types:	Single-ended channel input type.
 			- "SPI_R": SPI with data on rising edge (default)
 			- "SPI_F": SPI with data on falling edge
-			- "MANCH_R": manchester codec, rising edge = logic 0
-			- "MANCH_F": manchester codec, falling edge = logic 1
+			- "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1
+			- "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
 - st,adc-channel-clk-src: Conversion clock source.
 			  - "CLKIN": external SPI clock (CLKIN x)
 			  - "CLKOUT": internal SPI clock (CLKOUT) (default)

+ 27 - 0
Documentation/devicetree/bindings/iio/potentiometer/ad5272.txt

@@ -0,0 +1,27 @@
+* Analog Devices AD5272 digital potentiometer
+
+The node for this device must be a child node of a I2C controller, hence
+all mandatory properties for your controller must be specified. See directory:
+
+        Documentation/devicetree/bindings/i2c
+
+for more details.
+
+Required properties:
+	- compatible:  	Must be one of the following, depending on the model:
+			adi,ad5272-020
+			adi,ad5272-050
+			adi,ad5272-100
+			adi,ad5274-020
+			adi,ad5274-100
+
+Optional properties:
+ - reset-gpios: GPIO specification for the RESET input. This is an
+		active low signal to the AD5272.
+
+Example:
+ad5272: potentiometer@2f {
+	reg = <0x2F>;
+	compatible = "adi,ad5272-020";
+	reset-gpios = <&gpio3 6 GPIO_ACTIVE_HIGH>;
+};

+ 28 - 0
Documentation/devicetree/bindings/iio/temperature/mlx90632.txt

@@ -0,0 +1,28 @@
+* Melexis MLX90632 contactless Infra Red temperature sensor
+
+Link to datasheet: https://www.melexis.com/en/documents/documentation/datasheets/datasheet-mlx90632
+
+There are various applications for the Infra Red contactless temperature sensor
+and MLX90632 is most suitable for consumer applications where measured object
+temperature is in range between -20 to 200 degrees Celsius with relative error
+of measurement below 1 degree Celsius in object temperature range for
+industrial applications. Since it can operate and measure ambient temperature
+in range of -20 to 85 degrees Celsius it is suitable also for outdoor use.
+
+Be aware that electronics surrounding the sensor can increase ambient
+temperature. MLX90632 can be calibrated to reduce the housing effect via
+already existing EEPROM parameters.
+
+Since measured object emissivity effects Infra Red energy emitted, emissivity
+should be set before requesting the object temperature.
+
+Required properties:
+  - compatible: should be "melexis,mlx90632"
+  - reg: the I2C address of the sensor (default 0x3a)
+
+Example:
+
+mlx90632@3a {
+	compatible = "melexis,mlx90632";
+	reg = <0x3a>;
+};

+ 16 - 0
Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-vchiq.txt

@@ -0,0 +1,16 @@
+Broadcom VCHIQ firmware services
+
+Required properties:
+
+- compatible:	Should be "brcm,bcm2835-vchiq"
+- reg:		Physical base address and length of the doorbell register pair
+- interrupts:	The interrupt number
+		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
+Example:
+
+mailbox@7e00b840 {
+	compatible = "brcm,bcm2835-vchiq";
+	reg = <0x7e00b840 0xf>;
+	interrupts = <0 2>;
+};

+ 8 - 0
Documentation/networking/dpaa2/index.rst

@@ -0,0 +1,8 @@
+===================
+DPAA2 Documentation
+===================
+
+.. toctree::
+   :maxdepth: 1
+
+   overview

+ 0 - 0
drivers/staging/fsl-mc/overview.rst → Documentation/networking/dpaa2/overview.rst


+ 1 - 0
Documentation/networking/index.rst

@@ -8,6 +8,7 @@ Contents:
 
    batman-adv
    can
+   dpaa2/index
    kapi
    z8530book
    msg_zerocopy

+ 0 - 10
Documentation/networking/irda.txt

@@ -1,10 +0,0 @@
-To use the IrDA protocols within Linux you will need to get a suitable copy
-of the IrDA Utilities. More detailed information about these and associated
-programs can be found on http://irda.sourceforge.net/
-
-For more information about how to use the IrDA protocol stack, see the
-Linux Infrared HOWTO by Werner Heuser <wehe@tuxmobil.org>:
-<http://www.tuxmobil.org/Infrared-HOWTO/Infrared-HOWTO.html>
-
-There is an active mailing list for discussing Linux-IrDA matters called
-    irda-users@lists.sourceforge.net

+ 17 - 2
MAINTAINERS

@@ -4370,6 +4370,12 @@ L:	linux-kernel@vger.kernel.org
 S:	Maintained
 F:	drivers/staging/fsl-dpaa2/ethernet
 
+DPAA2 ETHERNET SWITCH DRIVER
+M:	Razvan Stefanescu <razvan.stefanescu@nxp.com>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+F:	drivers/staging/fsl-dpaa2/ethsw
+
 DPT_I2O SCSI RAID DRIVER
 M:	Adaptec OEM Raid Solutions <aacraid@adaptec.com>
 L:	linux-scsi@vger.kernel.org
@@ -8587,11 +8593,12 @@ W:	https://linuxtv.org
 S:	Maintained
 F:	drivers/media/radio/radio-maxiradio*
 
-MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVER
+MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
 M:	Peter Rosin <peda@axentia.se>
 L:	linux-iio@vger.kernel.org
 S:	Maintained
 F:	Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
+F:	drivers/iio/potentiometer/mcp4018.c
 F:	drivers/iio/potentiometer/mcp4531.c
 
 MCR20A IEEE-802.15.4 RADIO DRIVER
@@ -8915,6 +8922,13 @@ W:	http://www.melexis.com
 S:	Supported
 F:	drivers/iio/temperature/mlx90614.c
 
+MELEXIS MLX90632 DRIVER
+M:	Crt Mori <cmo@melexis.com>
+L:	linux-iio@vger.kernel.org
+W:	http://www.melexis.com
+S:	Supported
+F:	drivers/iio/temperature/mlx90632.c
+
 MELFAS MIP4 TOUCHSCREEN DRIVER
 M:	Sangwon Jee <jeesw@melfas.com>
 W:	http://www.melfas.com
@@ -11529,8 +11543,9 @@ M:	Stuart Yoder <stuyoder@gmail.com>
 M:	Laurentiu Tudor <laurentiu.tudor@nxp.com>
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
-F:	drivers/staging/fsl-mc/
+F:	drivers/bus/fsl-mc/
 F:	Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+F:	Documentation/networking/dpaa2/overview.rst
 
 QT1010 MEDIA DRIVER
 M:	Antti Palosaari <crope@iki.fi>

+ 6 - 0
arch/arm/boot/dts/bcm2835-rpi.dtsi

@@ -27,6 +27,12 @@
 			firmware = <&firmware>;
 			#power-domain-cells = <1>;
 		};
+
+		mailbox@7e00b840 {
+			compatible = "brcm,bcm2835-vchiq";
+			reg = <0x7e00b840 0xf>;
+			interrupts = <0 2>;
+		};
 	};
 };
 

+ 2 - 0
drivers/bus/Kconfig

@@ -199,4 +199,6 @@ config DA8XX_MSTPRI
 	  configuration. Allows to adjust the priorities of all master
 	  peripherals.
 
+source "drivers/bus/fsl-mc/Kconfig"
+
 endmenu

+ 4 - 0
drivers/bus/Makefile

@@ -8,6 +8,10 @@ obj-$(CONFIG_ARM_CCI)		+= arm-cci.o
 obj-$(CONFIG_ARM_CCN)		+= arm-ccn.o
 
 obj-$(CONFIG_BRCMSTB_GISB_ARB)	+= brcmstb_gisb.o
+
+# DPAA2 fsl-mc bus
+obj-$(CONFIG_FSL_MC_BUS)	+= fsl-mc/
+
 obj-$(CONFIG_IMX_WEIM)		+= imx-weim.o
 obj-$(CONFIG_MIPS_CDMM)		+= mips_cdmm.o
 obj-$(CONFIG_MVEBU_MBUS) 	+= mvebu-mbus.o

+ 16 - 0
drivers/bus/fsl-mc/Kconfig

@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DPAA2 fsl-mc bus
+#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+#
+
+config FSL_MC_BUS
+	bool "QorIQ DPAA2 fsl-mc bus driver"
+	depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
+	select GENERIC_MSI_IRQ_DOMAIN
+	help
+	  Driver to enable the bus infrastructure for the QorIQ DPAA2
+	  architecture.  The fsl-mc bus driver handles discovery of
+	  DPAA2 objects (which are represented as Linux devices) and
+	  binding objects to drivers.

+ 18 - 0
drivers/bus/fsl-mc/Makefile

@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Freescale Management Complex (MC) bus drivers
+#
+# Copyright (C) 2014 Freescale Semiconductor, Inc.
+#
+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+mc-bus-driver-objs := fsl-mc-bus.o \
+		      mc-sys.o \
+		      mc-io.o \
+		      dpbp.o \
+		      dpcon.o \
+		      dprc.o \
+		      dprc-driver.o \
+		      fsl-mc-allocator.o \
+		      fsl-mc-msi.o \
+		      dpmcp.o

+ 9 - 76
drivers/staging/fsl-mc/bus/dpbp.c → drivers/bus/fsl-mc/dpbp.c

@@ -4,10 +4,10 @@
  *
  */
 #include <linux/kernel.h>
-#include "../include/mc.h"
-#include "../include/dpbp.h"
+#include <linux/fsl/mc.h>
+#include <linux/fsl/mc.h>
 
-#include "dpbp-cmd.h"
+#include "fsl-mc-private.h"
 
 /**
  * dpbp_open() - Open a control session for the specified object.
@@ -31,7 +31,7 @@ int dpbp_open(struct fsl_mc_io *mc_io,
 	      int dpbp_id,
 	      u16 *token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpbp_cmd_open *cmd_params;
 	int err;
 
@@ -68,7 +68,7 @@ int dpbp_close(struct fsl_mc_io *mc_io,
 	       u32 cmd_flags,
 	       u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
@@ -91,7 +91,7 @@ int dpbp_enable(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
 		u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
@@ -114,7 +114,7 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
 		 u32 cmd_flags,
 		 u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
@@ -125,40 +125,6 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
 }
 EXPORT_SYMBOL_GPL(dpbp_disable);
 
-/**
- * dpbp_is_enabled() - Check if the DPBP is enabled.
- * @mc_io:	Pointer to MC portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPBP object
- * @en:		Returns '1' if object is enabled; '0' otherwise
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-		    u32 cmd_flags,
-		    u16 token,
-		    int *en)
-{
-	struct mc_command cmd = { 0 };
-	struct dpbp_rsp_is_enabled *rsp_params;
-	int err;
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
-					  token);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
-	*en = rsp_params->enabled & DPBP_ENABLE;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(dpbp_is_enabled);
-
 /**
  * dpbp_reset() - Reset the DPBP, returns the object to initial state.
  * @mc_io:	Pointer to MC portal's I/O object
@@ -171,7 +137,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
 	       u32 cmd_flags,
 	       u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
@@ -197,7 +163,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
 			u16 token,
 			struct dpbp_attr *attr)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpbp_rsp_get_attributes *rsp_params;
 	int err;
 
@@ -218,36 +184,3 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
 	return 0;
 }
 EXPORT_SYMBOL_GPL(dpbp_get_attributes);
-
-/**
- * dpbp_get_api_version - Get Data Path Buffer Pool API version
- * @mc_io:	Pointer to Mc portal's I/O object
- * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver:	Major version of Buffer Pool API
- * @minor_ver:	Minor version of Buffer Pool API
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-			 u32 cmd_flags,
-			 u16 *major_ver,
-			 u16 *minor_ver)
-{
-	struct mc_command cmd = { 0 };
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
-					  cmd_flags, 0);
-
-	/* send command to mc */
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(dpbp_get_api_version);

+ 10 - 10
drivers/staging/fsl-mc/bus/dpcon.c → drivers/bus/fsl-mc/dpcon.c

@@ -4,10 +4,10 @@
  *
  */
 #include <linux/kernel.h>
-#include "../include/mc.h"
-#include "../include/dpcon.h"
+#include <linux/fsl/mc.h>
+#include <linux/fsl/mc.h>
 
-#include "dpcon-cmd.h"
+#include "fsl-mc-private.h"
 
 /**
  * dpcon_open() - Open a control session for the specified object
@@ -31,7 +31,7 @@ int dpcon_open(struct fsl_mc_io *mc_io,
 	       int dpcon_id,
 	       u16 *token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpcon_cmd_open *dpcon_cmd;
 	int err;
 
@@ -69,7 +69,7 @@ int dpcon_close(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
 		u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
@@ -93,7 +93,7 @@ int dpcon_enable(struct fsl_mc_io *mc_io,
 		 u32 cmd_flags,
 		 u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
@@ -117,7 +117,7 @@ int dpcon_disable(struct fsl_mc_io *mc_io,
 		  u32 cmd_flags,
 		  u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
@@ -141,7 +141,7 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
 		u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
@@ -166,7 +166,7 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
 			 u16 token,
 			 struct dpcon_attr *attr)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpcon_rsp_get_attr *dpcon_rsp;
 	int err;
 
@@ -204,7 +204,7 @@ int dpcon_set_notification(struct fsl_mc_io *mc_io,
 			   u16 token,
 			   struct dpcon_notification_cfg *cfg)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpcon_cmd_set_notification *dpcon_cmd;
 
 	/* prepare command */

+ 4 - 4
drivers/staging/fsl-mc/bus/dpmcp.c → drivers/bus/fsl-mc/dpmcp.c

@@ -4,7 +4,7 @@
  *
  */
 #include <linux/kernel.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 #include "fsl-mc-private.h"
 
@@ -30,7 +30,7 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
 	       int dpmcp_id,
 	       u16 *token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpmcp_cmd_open *cmd_params;
 	int err;
 
@@ -66,7 +66,7 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
 		u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
@@ -88,7 +88,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
 		u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,

+ 1 - 1
drivers/staging/fsl-mc/bus/dprc-driver.c → drivers/bus/fsl-mc/dprc-driver.c

@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/msi.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 #include "fsl-mc-private.h"
 

+ 16 - 15
drivers/staging/fsl-mc/bus/dprc.c → drivers/bus/fsl-mc/dprc.c

@@ -4,7 +4,8 @@
  *
  */
 #include <linux/kernel.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
+
 #include "fsl-mc-private.h"
 
 /**
@@ -23,7 +24,7 @@ int dprc_open(struct fsl_mc_io *mc_io,
 	      int container_id,
 	      u16 *token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_open *cmd_params;
 	int err;
 
@@ -60,7 +61,7 @@ int dprc_close(struct fsl_mc_io *mc_io,
 	       u32 cmd_flags,
 	       u16 token)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
@@ -87,7 +88,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
 		 u8 irq_index,
 		 struct dprc_irq_cfg *irq_cfg)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_set_irq *cmd_params;
 
 	/* prepare command */
@@ -125,7 +126,7 @@ int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
 			u8 irq_index,
 			u8 en)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_set_irq_enable *cmd_params;
 
 	/* prepare command */
@@ -161,7 +162,7 @@ int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
 		      u8 irq_index,
 		      u32 mask)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_set_irq_mask *cmd_params;
 
 	/* prepare command */
@@ -193,7 +194,7 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
 			u8 irq_index,
 			u32 *status)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_get_irq_status *cmd_params;
 	struct dprc_rsp_get_irq_status *rsp_params;
 	int err;
@@ -235,7 +236,7 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
 			  u8 irq_index,
 			  u32 status)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_clear_irq_status *cmd_params;
 
 	/* prepare command */
@@ -263,7 +264,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
 			u16 token,
 			struct dprc_attributes *attr)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_rsp_get_attributes *rsp_params;
 	int err;
 
@@ -301,7 +302,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
 		       u16 token,
 		       int *obj_count)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_rsp_get_obj_count *rsp_params;
 	int err;
 
@@ -343,7 +344,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
 		 int obj_index,
 		 struct fsl_mc_obj_desc *obj_desc)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_get_obj *cmd_params;
 	struct dprc_rsp_get_obj *rsp_params;
 	int err;
@@ -398,7 +399,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
 		     u8 irq_index,
 		     struct dprc_irq_cfg *irq_cfg)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_set_obj_irq *cmd_params;
 
 	/* prepare command */
@@ -439,7 +440,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
 			u8 region_index,
 			struct dprc_region_desc *region_desc)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dprc_cmd_get_obj_region *cmd_params;
 	struct dprc_rsp_get_obj_region *rsp_params;
 	int err;
@@ -481,7 +482,7 @@ int dprc_get_api_version(struct fsl_mc_io *mc_io,
 			 u16 *major_ver,
 			 u16 *minor_ver)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	int err;
 
 	/* prepare command */
@@ -511,7 +512,7 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
 			  u32 cmd_flags,
 			  int *container_id)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	int err;
 
 	/* prepare command */

+ 6 - 1
drivers/staging/fsl-mc/bus/fsl-mc-allocator.c → drivers/bus/fsl-mc/fsl-mc-allocator.c

@@ -8,7 +8,7 @@
 
 #include <linux/module.h>
 #include <linux/msi.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 #include "fsl-mc-private.h"
 
@@ -646,3 +646,8 @@ int __init fsl_mc_allocator_driver_init(void)
 {
 	return fsl_mc_driver_register(&fsl_mc_allocator_driver);
 }
+
+void fsl_mc_allocator_driver_exit(void)
+{
+	fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+}

+ 1 - 1
drivers/staging/fsl-mc/bus/fsl-mc-bus.c → drivers/bus/fsl-mc/fsl-mc-bus.c

@@ -314,7 +314,7 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
 			  u32 cmd_flags,
 			  struct mc_version *mc_ver_info)
 {
-	struct mc_command cmd = { 0 };
+	struct fsl_mc_command cmd = { 0 };
 	struct dpmng_rsp_get_version *rsp_params;
 	int err;
 

+ 1 - 0
drivers/staging/fsl-mc/bus/fsl-mc-msi.c → drivers/bus/fsl-mc/fsl-mc-msi.c

@@ -13,6 +13,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/msi.h>
+
 #include "fsl-mc-private.h"
 
 #ifdef GENERIC_MSI_DOMAIN_OPS

+ 90 - 1
drivers/staging/fsl-mc/bus/fsl-mc-private.h → drivers/bus/fsl-mc/fsl-mc-private.h

@@ -8,7 +8,7 @@
 #ifndef _FSL_MC_PRIVATE_H_
 #define _FSL_MC_PRIVATE_H_
 
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 #include <linux/mutex.h>
 
 /*
@@ -379,6 +379,93 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
 			  u32 cmd_flags,
 			  int *container_id);
 
+/*
+ * Data Path Buffer Pool (DPBP) API
+ */
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR				3
+#define DPBP_VER_MINOR				2
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION			1
+#define DPBP_CMD_ID_OFFSET			4
+
+#define DPBP_CMD(id)	(((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE		DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN			DPBP_CMD(0x804)
+
+#define DPBP_CMDID_ENABLE		DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE		DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR		DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET		DPBP_CMD(0x005)
+
+struct dpbp_cmd_open {
+	__le32 dpbp_id;
+};
+
+#define DPBP_ENABLE			0x1
+
+struct dpbp_rsp_get_attributes {
+	/* response word 0 */
+	__le16 pad;
+	__le16 bpid;
+	__le32 id;
+	/* response word 1 */
+	__le16 version_major;
+	__le16 version_minor;
+};
+
+/*
+ * Data Path Concentrator (DPCON) API
+ */
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR				3
+#define DPCON_VER_MINOR				2
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION			1
+#define DPCON_CMD_ID_OFFSET			4
+
+#define DPCON_CMD(id)	(((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE			DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN			DPCON_CMD(0x808)
+
+#define DPCON_CMDID_ENABLE			DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE			DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR			DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET			DPCON_CMD(0x005)
+
+#define DPCON_CMDID_SET_NOTIFICATION		DPCON_CMD(0x100)
+
+struct dpcon_cmd_open {
+	__le32 dpcon_id;
+};
+
+#define DPCON_ENABLE			1
+
+struct dpcon_rsp_get_attr {
+	/* response word 0 */
+	__le32 id;
+	__le16 qbman_ch_id;
+	u8 num_priorities;
+	u8 pad;
+};
+
+struct dpcon_cmd_set_notification {
+	/* cmd word 0 */
+	__le32 dpio_id;
+	u8 priority;
+	u8 pad[3];
+	/* cmd word 1 */
+	__le64 user_ctx;
+};
+
 /**
  * Maximum number of total IRQs that can be pre-allocated for an MC bus'
  * IRQ pool
@@ -438,6 +525,8 @@ void dprc_driver_exit(void);
 
 int __init fsl_mc_allocator_driver_init(void);
 
+void fsl_mc_allocator_driver_exit(void);
+
 void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
 
 void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);

+ 1 - 1
drivers/staging/fsl-mc/bus/mc-io.c → drivers/bus/fsl-mc/mc-io.c

@@ -5,7 +5,7 @@
  */
 
 #include <linux/io.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 #include "fsl-mc-private.h"
 

+ 11 - 11
drivers/staging/fsl-mc/bus/mc-sys.c → drivers/bus/fsl-mc/mc-sys.c

@@ -12,7 +12,7 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 #include "fsl-mc-private.h"
 
@@ -28,14 +28,14 @@
 #define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS    10
 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS    500
 
-static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
 {
 	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
 
 	return (enum mc_cmd_status)hdr->status;
 }
 
-static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
 {
 	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
 	u16 cmd_id = le16_to_cpu(hdr->cmd_id);
@@ -94,8 +94,8 @@ static const char *mc_status_to_string(enum mc_cmd_status status)
  * @portal: pointer to an MC portal
  * @cmd: pointer to a filled command
  */
-static inline void mc_write_command(struct mc_command __iomem *portal,
-				    struct mc_command *cmd)
+static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
+				    struct fsl_mc_command *cmd)
 {
 	int i;
 
@@ -121,9 +121,9 @@ static inline void mc_write_command(struct mc_command __iomem *portal,
  *
  * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
  */
-static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
-						  portal,
-						  struct mc_command *resp)
+static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
+						  *portal,
+						  struct fsl_mc_command *resp)
 {
 	int i;
 	enum mc_cmd_status status;
@@ -156,7 +156,7 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
  * @mc_status: MC command completion status
  */
 static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
-				       struct mc_command *cmd,
+				       struct fsl_mc_command *cmd,
 				       enum mc_cmd_status *mc_status)
 {
 	enum mc_cmd_status status;
@@ -202,7 +202,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
  * @mc_status: MC command completion status
  */
 static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
-				  struct mc_command *cmd,
+				  struct fsl_mc_command *cmd,
 				  enum mc_cmd_status *mc_status)
 {
 	enum mc_cmd_status status;
@@ -241,7 +241,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
  *
  * Returns '0' on Success; Error code otherwise.
  */
-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
 {
 	int error;
 	enum mc_cmd_status status;

+ 1 - 5
drivers/iio/accel/bmc150-accel-core.c

@@ -336,8 +336,7 @@ static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
 		return ret;
 	}
 
-	dev_dbg(dev, "%s: %x %x\n", __func__, data->slope_thres,
-		data->slope_dur);
+	dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur);
 
 	return ret;
 }
@@ -1716,7 +1715,6 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
 	struct bmc150_accel_data *data = iio_priv(indio_dev);
 	int ret;
 
-	dev_dbg(dev,  __func__);
 	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
 	if (ret < 0)
 		return -EAGAIN;
@@ -1731,8 +1729,6 @@ static int bmc150_accel_runtime_resume(struct device *dev)
 	int ret;
 	int sleep_val;
 
-	dev_dbg(dev,  __func__);
-
 	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
 	if (ret < 0)
 		return ret;

+ 1 - 1
drivers/iio/accel/hid-sensor-accel-3d.c

@@ -155,7 +155,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
 	*val = 0;
 	*val2 = 0;
 	switch (mask) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		hid_sensor_power_state(&accel_state->common_attributes, true);
 		report_id = accel_state->accel[chan->scan_index].report_id;
 		address = accel_3d_addresses[chan->scan_index];

+ 1 - 2
drivers/iio/accel/st_accel_i2c.c

@@ -159,9 +159,8 @@ static int st_accel_i2c_probe(struct i2c_client *client,
 		if ((ret < 0) || (ret >= ST_ACCEL_MAX))
 			return -ENODEV;
 
-		strncpy(client->name, st_accel_id_table[ret].name,
+		strlcpy(client->name, st_accel_id_table[ret].name,
 				sizeof(client->name));
-		client->name[sizeof(client->name) - 1] = '\0';
 	} else if (!id)
 		return -ENODEV;
 

+ 1 - 2
drivers/iio/adc/Kconfig

@@ -144,10 +144,9 @@ config ASPEED_ADC
 config AT91_ADC
 	tristate "Atmel AT91 ADC"
 	depends on ARCH_AT91
-	depends on INPUT
+	depends on INPUT && SYSFS
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
-	select SYSFS
 	help
 	  Say yes here to build support for Atmel AT91 ADC.
 

+ 23 - 3
drivers/iio/adc/ad7476.c

@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * AD7466/7/8 AD7476/5/7/8 (A) SPI ADC driver
+ * Analog Devices AD7466/7/8 AD7476/5/7/8 (A) SPI ADC driver
+ * TI ADC081S/ADC101S/ADC121S 8/10/12-bit SPI ADC driver
  *
  * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
  */
 
 #include <linux/device.h>
@@ -56,6 +56,9 @@ enum ad7476_supported_device_ids {
 	ID_AD7468,
 	ID_AD7495,
 	ID_AD7940,
+	ID_ADC081S,
+	ID_ADC101S,
+	ID_ADC121S,
 };
 
 static irqreturn_t ad7476_trigger_handler(int irq, void  *p)
@@ -147,6 +150,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
 	},							\
 }
 
+#define ADC081S_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
+		BIT(IIO_CHAN_INFO_RAW))
 #define AD7476_CHAN(bits) _AD7476_CHAN((bits), 13 - (bits), \
 		BIT(IIO_CHAN_INFO_RAW))
 #define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
@@ -192,6 +197,18 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
 		.channel[0] = AD7940_CHAN(14),
 		.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
 	},
+	[ID_ADC081S] = {
+		.channel[0] = ADC081S_CHAN(8),
+		.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+	},
+	[ID_ADC101S] = {
+		.channel[0] = ADC081S_CHAN(10),
+		.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+	},
+	[ID_ADC121S] = {
+		.channel[0] = ADC081S_CHAN(12),
+		.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+	},
 };
 
 static const struct iio_info ad7476_info = {
@@ -294,6 +311,9 @@ static const struct spi_device_id ad7476_id[] = {
 	{"ad7910", ID_AD7467},
 	{"ad7920", ID_AD7466},
 	{"ad7940", ID_AD7940},
+	{"adc081s", ID_ADC081S},
+	{"adc101s", ID_ADC101S},
+	{"adc121s", ID_ADC121S},
 	{}
 };
 MODULE_DEVICE_TABLE(spi, ad7476_id);

+ 157 - 11
drivers/iio/adc/axp20x_adc.c

@@ -35,8 +35,13 @@
 #define AXP20X_GPIO10_IN_RANGE_GPIO1_VAL(x)	(((x) & BIT(0)) << 1)
 
 #define AXP20X_ADC_RATE_MASK			GENMASK(7, 6)
+#define AXP813_V_I_ADC_RATE_MASK		GENMASK(5, 4)
+#define AXP813_ADC_RATE_MASK			(AXP20X_ADC_RATE_MASK | AXP813_V_I_ADC_RATE_MASK)
 #define AXP20X_ADC_RATE_HZ(x)			((ilog2((x) / 25) << 6) & AXP20X_ADC_RATE_MASK)
 #define AXP22X_ADC_RATE_HZ(x)			((ilog2((x) / 100) << 6) & AXP20X_ADC_RATE_MASK)
+#define AXP813_TS_GPIO0_ADC_RATE_HZ(x)		AXP20X_ADC_RATE_HZ(x)
+#define AXP813_V_I_ADC_RATE_HZ(x)		((ilog2((x) / 100) << 4) & AXP813_V_I_ADC_RATE_MASK)
+#define AXP813_ADC_RATE_HZ(x)			(AXP20X_ADC_RATE_HZ(x) | AXP813_V_I_ADC_RATE_HZ(x))
 
 #define AXP20X_ADC_CHANNEL(_channel, _name, _type, _reg)	\
 	{							\
@@ -95,6 +100,12 @@ enum axp22x_adc_channel_i {
 	AXP22X_BATT_DISCHRG_I,
 };
 
+enum axp813_adc_channel_v {
+	AXP813_TS_IN = 0,
+	AXP813_GPIO0_V,
+	AXP813_BATT_V,
+};
+
 static struct iio_map axp20x_maps[] = {
 	{
 		.consumer_dev_name = "axp20x-usb-power-supply",
@@ -197,6 +208,25 @@ static const struct iio_chan_spec axp22x_adc_channels[] = {
 			   AXP20X_BATT_DISCHRG_I_H),
 };
 
+static const struct iio_chan_spec axp813_adc_channels[] = {
+	{
+		.type = IIO_TEMP,
+		.address = AXP22X_PMIC_TEMP_H,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OFFSET),
+		.datasheet_name = "pmic_temp",
+	},
+	AXP20X_ADC_CHANNEL(AXP813_GPIO0_V, "gpio0_v", IIO_VOLTAGE,
+			   AXP288_GP_ADC_H),
+	AXP20X_ADC_CHANNEL(AXP813_BATT_V, "batt_v", IIO_VOLTAGE,
+			   AXP20X_BATT_V_H),
+	AXP20X_ADC_CHANNEL(AXP22X_BATT_CHRG_I, "batt_chrg_i", IIO_CURRENT,
+			   AXP20X_BATT_CHRG_I_H),
+	AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
+			   AXP20X_BATT_DISCHRG_I_H),
+};
+
 static int axp20x_adc_raw(struct iio_dev *indio_dev,
 			  struct iio_chan_spec const *chan, int *val)
 {
@@ -243,6 +273,18 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
 	return IIO_VAL_INT;
 }
 
+static int axp813_adc_raw(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan, int *val)
+{
+	struct axp20x_adc_iio *info = iio_priv(indio_dev);
+
+	*val = axp20x_read_variable_width(info->regmap, chan->address, 12);
+	if (*val < 0)
+		return *val;
+
+	return IIO_VAL_INT;
+}
+
 static int axp20x_adc_scale_voltage(int channel, int *val, int *val2)
 {
 	switch (channel) {
@@ -273,6 +315,24 @@ static int axp20x_adc_scale_voltage(int channel, int *val, int *val2)
 	}
 }
 
+static int axp813_adc_scale_voltage(int channel, int *val, int *val2)
+{
+	switch (channel) {
+	case AXP813_GPIO0_V:
+		*val = 0;
+		*val2 = 800000;
+		return IIO_VAL_INT_PLUS_MICRO;
+
+	case AXP813_BATT_V:
+		*val = 1;
+		*val2 = 100000;
+		return IIO_VAL_INT_PLUS_MICRO;
+
+	default:
+		return -EINVAL;
+	}
+}
+
 static int axp20x_adc_scale_current(int channel, int *val, int *val2)
 {
 	switch (channel) {
@@ -342,6 +402,26 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
 	}
 }
 
+static int axp813_adc_scale(struct iio_chan_spec const *chan, int *val,
+			    int *val2)
+{
+	switch (chan->type) {
+	case IIO_VOLTAGE:
+		return axp813_adc_scale_voltage(chan->channel, val, val2);
+
+	case IIO_CURRENT:
+		*val = 1;
+		return IIO_VAL_INT;
+
+	case IIO_TEMP:
+		*val = 100;
+		return IIO_VAL_INT;
+
+	default:
+		return -EINVAL;
+	}
+}
+
 static int axp20x_adc_offset_voltage(struct iio_dev *indio_dev, int channel,
 				     int *val)
 {
@@ -365,7 +445,7 @@ static int axp20x_adc_offset_voltage(struct iio_dev *indio_dev, int channel,
 		return -EINVAL;
 	}
 
-	*val = !!(*val) * 700000;
+	*val = *val ? 700000 : 0;
 
 	return IIO_VAL_INT;
 }
@@ -425,6 +505,26 @@ static int axp22x_read_raw(struct iio_dev *indio_dev,
 	}
 }
 
+static int axp813_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan, int *val,
+			   int *val2, long mask)
+{
+	switch (mask) {
+	case IIO_CHAN_INFO_OFFSET:
+		*val = -2667;
+		return IIO_VAL_INT;
+
+	case IIO_CHAN_INFO_SCALE:
+		return axp813_adc_scale(chan, val, val2);
+
+	case IIO_CHAN_INFO_RAW:
+		return axp813_adc_raw(indio_dev, chan, val);
+
+	default:
+		return -EINVAL;
+	}
+}
+
 static int axp20x_write_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan, int val, int val2,
 			    long mask)
@@ -442,15 +542,17 @@ static int axp20x_write_raw(struct iio_dev *indio_dev,
 	if (val != 0 && val != 700000)
 		return -EINVAL;
 
+	val = val ? 1 : 0;
+
 	switch (chan->channel) {
 	case AXP20X_GPIO0_V:
 		reg = AXP20X_GPIO10_IN_RANGE_GPIO0;
-		regval = AXP20X_GPIO10_IN_RANGE_GPIO0_VAL(!!val);
+		regval = AXP20X_GPIO10_IN_RANGE_GPIO0_VAL(val);
 		break;
 
 	case AXP20X_GPIO1_V:
 		reg = AXP20X_GPIO10_IN_RANGE_GPIO1;
-		regval = AXP20X_GPIO10_IN_RANGE_GPIO1_VAL(!!val);
+		regval = AXP20X_GPIO10_IN_RANGE_GPIO1_VAL(val);
 		break;
 
 	default:
@@ -470,14 +572,29 @@ static const struct iio_info axp22x_adc_iio_info = {
 	.read_raw = axp22x_read_raw,
 };
 
-static int axp20x_adc_rate(int rate)
+static const struct iio_info axp813_adc_iio_info = {
+	.read_raw = axp813_read_raw,
+};
+
+static int axp20x_adc_rate(struct axp20x_adc_iio *info, int rate)
+{
+	return regmap_update_bits(info->regmap, AXP20X_ADC_RATE,
+				  AXP20X_ADC_RATE_MASK,
+				  AXP20X_ADC_RATE_HZ(rate));
+}
+
+static int axp22x_adc_rate(struct axp20x_adc_iio *info, int rate)
 {
-	return AXP20X_ADC_RATE_HZ(rate);
+	return regmap_update_bits(info->regmap, AXP20X_ADC_RATE,
+				  AXP20X_ADC_RATE_MASK,
+				  AXP22X_ADC_RATE_HZ(rate));
 }
 
-static int axp22x_adc_rate(int rate)
+static int axp813_adc_rate(struct axp20x_adc_iio *info, int rate)
 {
-	return AXP22X_ADC_RATE_HZ(rate);
+	return regmap_update_bits(info->regmap, AXP813_ADC_RATE,
+				 AXP813_ADC_RATE_MASK,
+				 AXP813_ADC_RATE_HZ(rate));
 }
 
 struct axp_data {
@@ -485,7 +602,8 @@ struct axp_data {
 	int				num_channels;
 	struct iio_chan_spec const	*channels;
 	unsigned long			adc_en1_mask;
-	int				(*adc_rate)(int rate);
+	int				(*adc_rate)(struct axp20x_adc_iio *info,
+						    int rate);
 	bool				adc_en2;
 	struct iio_map			*maps;
 };
@@ -510,9 +628,28 @@ static const struct axp_data axp22x_data = {
 	.maps = axp22x_maps,
 };
 
+static const struct axp_data axp813_data = {
+	.iio_info = &axp813_adc_iio_info,
+	.num_channels = ARRAY_SIZE(axp813_adc_channels),
+	.channels = axp813_adc_channels,
+	.adc_en1_mask = AXP22X_ADC_EN1_MASK,
+	.adc_rate = axp813_adc_rate,
+	.adc_en2 = false,
+	.maps = axp22x_maps,
+};
+
+static const struct of_device_id axp20x_adc_of_match[] = {
+	{ .compatible = "x-powers,axp209-adc", .data = (void *)&axp20x_data, },
+	{ .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, },
+	{ .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, axp20x_adc_of_match);
+
 static const struct platform_device_id axp20x_adc_id_match[] = {
 	{ .name = "axp20x-adc", .driver_data = (kernel_ulong_t)&axp20x_data, },
 	{ .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, },
+	{ .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(platform, axp20x_adc_id_match);
@@ -538,7 +675,16 @@ static int axp20x_probe(struct platform_device *pdev)
 	indio_dev->dev.of_node = pdev->dev.of_node;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	info->data = (struct axp_data *)platform_get_device_id(pdev)->driver_data;
+	if (!pdev->dev.of_node) {
+		const struct platform_device_id *id;
+
+		id = platform_get_device_id(pdev);
+		info->data = (struct axp_data *)id->driver_data;
+	} else {
+		struct device *dev = &pdev->dev;
+
+		info->data = (struct axp_data *)of_device_get_match_data(dev);
+	}
 
 	indio_dev->name = platform_get_device_id(pdev)->name;
 	indio_dev->info = info->data->iio_info;
@@ -554,8 +700,7 @@ static int axp20x_probe(struct platform_device *pdev)
 				   AXP20X_ADC_EN2_MASK, AXP20X_ADC_EN2_MASK);
 
 	/* Configure ADCs rate */
-	regmap_update_bits(info->regmap, AXP20X_ADC_RATE, AXP20X_ADC_RATE_MASK,
-			   info->data->adc_rate(100));
+	info->data->adc_rate(info, 100);
 
 	ret = iio_map_array_register(indio_dev, info->data->maps);
 	if (ret < 0) {
@@ -602,6 +747,7 @@ static int axp20x_remove(struct platform_device *pdev)
 static struct platform_driver axp20x_adc_driver = {
 	.driver = {
 		.name = "axp20x-adc",
+		.of_match_table = of_match_ptr(axp20x_adc_of_match),
 	},
 	.id_table = axp20x_adc_id_match,
 	.probe = axp20x_probe,

+ 0 - 4
drivers/iio/adc/ep93xx_adc.c

@@ -167,10 +167,6 @@ static int ep93xx_adc_probe(struct platform_device *pdev)
 	priv = iio_priv(iiodev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Cannot obtain memory resource\n");
-		return -ENXIO;
-	}
 	priv->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(priv->base)) {
 		dev_err(&pdev->dev, "Cannot map memory resource\n");

+ 4 - 12
drivers/iio/adc/ti-adc161s626.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ti-adc161s626.c - Texas Instruments ADC161S626 1-channel differential ADC
  *
@@ -5,17 +6,8 @@
  *  adc141s626 - 14-bit ADC
  *  adc161s626 - 16-bit ADC
  *
- * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -275,6 +267,6 @@ static struct spi_driver ti_adc_driver = {
 };
 module_spi_driver(ti_adc_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("Texas Instruments ADC1x1S 1-channel differential ADC");
 MODULE_LICENSE("GPL");

+ 4 - 13
drivers/iio/chemical/ams-iaq-core.c

@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ams-iaq-core.c - Support for AMS iAQ-Core VOC sensors
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+ * Copyright (C) 2015, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -194,6 +185,6 @@ static struct i2c_driver ams_iaqcore_driver = {
 };
 module_i2c_driver(ams_iaqcore_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("AMS iAQ-Core VOC sensors");
 MODULE_LICENSE("GPL v2");

+ 4 - 12
drivers/iio/chemical/atlas-ph-sensor.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Matt Ranostay
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -689,6 +681,6 @@ static struct i2c_driver atlas_driver = {
 };
 module_i2c_driver(atlas_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
 MODULE_LICENSE("GPL");

+ 5 - 5
drivers/iio/chemical/ccs811.c

@@ -32,7 +32,7 @@
 #define CCS811_ALG_RESULT_DATA	0x02
 #define CCS811_RAW_DATA		0x03
 #define CCS811_HW_ID		0x20
-#define CCS881_HW_ID_VALUE	0x81
+#define CCS811_HW_ID_VALUE	0x81
 #define CCS811_HW_VERSION	0x21
 #define CCS811_HW_VERSION_VALUE	0x10
 #define CCS811_HW_VERSION_MASK	0xF0
@@ -69,7 +69,7 @@ struct ccs811_reading {
 	__be16 voc;
 	u8 status;
 	u8 error;
-	__be16 resistance;
+	__be16 raw_data;
 } __attribute__((__packed__));
 
 struct ccs811_data {
@@ -213,12 +213,12 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
 
 		switch (chan->type) {
 		case IIO_VOLTAGE:
-			*val = be16_to_cpu(data->buffer.resistance) &
+			*val = be16_to_cpu(data->buffer.raw_data) &
 					   CCS811_VOLTAGE_MASK;
 			ret = IIO_VAL_INT;
 			break;
 		case IIO_CURRENT:
-			*val = be16_to_cpu(data->buffer.resistance) >> 10;
+			*val = be16_to_cpu(data->buffer.raw_data) >> 10;
 			ret = IIO_VAL_INT;
 			break;
 		case IIO_CONCENTRATION:
@@ -356,7 +356,7 @@ static int ccs811_probe(struct i2c_client *client,
 	if (ret < 0)
 		return ret;
 
-	if (ret != CCS881_HW_ID_VALUE) {
+	if (ret != CCS811_HW_ID_VALUE) {
 		dev_err(&client->dev, "hardware id doesn't match CCS81x\n");
 		return -ENODEV;
 	}

+ 4 - 13
drivers/iio/chemical/vz89x.c

@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * vz89x.c - Support for SGX Sensortech MiCS VZ89X VOC sensors
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+ * Copyright (C) 2015-2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -419,6 +410,6 @@ static struct i2c_driver vz89x_driver = {
 };
 module_i2c_driver(vz89x_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("SGX Sensortech MiCS VZ89X VOC sensors");
 MODULE_LICENSE("GPL v2");

+ 1 - 0
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c

@@ -289,6 +289,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
 static struct platform_driver cros_ec_sensors_platform_driver = {
 	.driver = {
 		.name	= "cros-ec-sensors",
+		.pm	= &cros_ec_sensors_pm_ops,
 	},
 	.probe		= cros_ec_sensors_probe,
 	.id_table	= cros_ec_sensors_ids,

+ 49 - 0
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c

@@ -446,5 +446,54 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
 }
 EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
 
+static int __maybe_unused cros_ec_sensors_prepare(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+	if (st->curr_sampl_freq == 0)
+		return 0;
+
+	/*
+	 * If the sensors are sampled at high frequency, we will not be able to
+	 * sleep. Set sampling to a long period if necessary.
+	 */
+	if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
+		mutex_lock(&st->cmd_lock);
+		st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+		st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY;
+		cros_ec_motion_send_host_cmd(st, 0);
+		mutex_unlock(&st->cmd_lock);
+	}
+	return 0;
+}
+
+static void __maybe_unused cros_ec_sensors_complete(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+	if (st->curr_sampl_freq == 0)
+		return;
+
+	if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
+		mutex_lock(&st->cmd_lock);
+		st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+		st->param.ec_rate.data = st->curr_sampl_freq;
+		cros_ec_motion_send_host_cmd(st, 0);
+		mutex_unlock(&st->cmd_lock);
+	}
+}
+
+const struct dev_pm_ops cros_ec_sensors_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+	.prepare = cros_ec_sensors_prepare,
+	.complete = cros_ec_sensors_complete
+#endif
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops);
+
 MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
 MODULE_LICENSE("GPL v2");

+ 2 - 0
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h

@@ -169,6 +169,8 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
 			       struct iio_chan_spec const *chan,
 			       int val, int val2, long mask);
 
+extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
+
 /* List of extended channel specification for all sensors */
 extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
 

+ 1 - 1
drivers/iio/dac/ad5380.c

@@ -158,7 +158,7 @@ static unsigned int ad5380_info_to_reg(struct iio_chan_spec const *chan,
 	long info)
 {
 	switch (info) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		return AD5380_REG_DATA(chan->address);
 	case IIO_CHAN_INFO_CALIBBIAS:
 		return AD5380_REG_OFFSET(chan->address);

+ 1 - 1
drivers/iio/dac/ad5764.c

@@ -168,7 +168,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
 static int ad5764_chan_info_to_reg(struct iio_chan_spec const *chan, long info)
 {
 	switch (info) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		return AD5764_REG_DATA(chan->address);
 	case IIO_CHAN_INFO_CALIBBIAS:
 		return AD5764_REG_OFFSET(chan->address);

+ 17 - 10
drivers/iio/dummy/Kconfig

@@ -9,20 +9,24 @@ config IIO_DUMMY_EVGEN
 	tristate
 
 config IIO_SIMPLE_DUMMY
-       tristate "An example driver with no hardware requirements"
-       depends on IIO_SW_DEVICE
-       help
-	 Driver intended mainly as documentation for how to write
-	 a driver. May also be useful for testing userspace code
-	 without hardware.
+	tristate "An example driver with no hardware requirements"
+	depends on IIO_SW_DEVICE
+	help
+	  Driver intended mainly as documentation for how to write
+	  a driver. May also be useful for testing userspace code
+	  without hardware.
 
 if IIO_SIMPLE_DUMMY
 
 config IIO_SIMPLE_DUMMY_EVENTS
-       bool "Event generation support"
-       select IIO_DUMMY_EVGEN
-       help
-         Add some dummy events to the simple dummy driver.
+	bool "Event generation support"
+	select IIO_DUMMY_EVGEN
+	help
+	  Add some dummy events to the simple dummy driver.
+
+	  The purpose of this is to generate 'fake' event interrupts thus
+	  allowing that driver's code to be as close as possible to that
+	  a normal driver talking to hardware.
 
 config IIO_SIMPLE_DUMMY_BUFFER
 	bool "Buffered capture support"
@@ -32,6 +36,9 @@ config IIO_SIMPLE_DUMMY_BUFFER
 	help
 	  Add buffered data capture to the simple dummy driver.
 
+	  Buffer handling elements of industrial I/O reference driver.
+	  Uses the kfifo buffer.
+
 endif # IIO_SIMPLE_DUMMY
 
 endmenu

+ 1 - 1
drivers/iio/gyro/hid-sensor-gyro-3d.c

@@ -115,7 +115,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
 	*val = 0;
 	*val2 = 0;
 	switch (mask) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		hid_sensor_power_state(&gyro_state->common_attributes, true);
 		report_id = gyro_state->gyro[chan->scan_index].report_id;
 		address = gyro_3d_addresses[chan->scan_index];

+ 4 - 12
drivers/iio/health/max30100.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * max30100.c - Support for MAX30100 heart rate and pulse oximeter sensor
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * TODO: enable pulse length controls via device tree properties
  */
@@ -518,6 +510,6 @@ static struct i2c_driver max30100_driver = {
 };
 module_i2c_driver(max30100_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("MAX30100 heart rate and pulse oximeter sensor");
 MODULE_LICENSE("GPL");

+ 2 - 0
drivers/iio/humidity/Kconfig

@@ -68,10 +68,12 @@ config HTS221
 config HTS221_I2C
 	tristate
 	depends on HTS221
+	select REGMAP_I2C
 
 config HTS221_SPI
 	tristate
 	depends on HTS221
+	select REGMAP_SPI
 
 config HTU21
 	tristate "Measurement Specialties HTU21 humidity & temperature sensor"

+ 1 - 1
drivers/iio/humidity/dht11.c

@@ -159,7 +159,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
 	}
 
 	dht11->timestamp = ktime_get_boot_ns();
-	if (hum_int < 20) {  /* DHT22 */
+	if (hum_int < 4) {  /* DHT22: 100000 = (3*256+232)*100 */
 		dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
 					((temp_int & 0x80) ? -100 : 100);
 		dht11->humidity = ((hum_int << 8) + hum_dec) * 100;

+ 4 - 12
drivers/iio/humidity/hdc100x.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * hdc100x.c - Support for the TI HDC100x temperature + humidity sensors
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * Datasheets:
  * http://www.ti.com/product/HDC1000/datasheet
@@ -449,6 +441,6 @@ static struct i2c_driver hdc100x_driver = {
 };
 module_i2c_driver(hdc100x_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("TI HDC100x humidity and temperature sensor driver");
 MODULE_LICENSE("GPL");

+ 2 - 19
drivers/iio/humidity/hts221.h

@@ -15,21 +15,8 @@
 
 #include <linux/iio/iio.h>
 
-#define HTS221_RX_MAX_LENGTH	8
-#define HTS221_TX_MAX_LENGTH	8
-
 #define HTS221_DATA_SIZE	2
 
-struct hts221_transfer_buffer {
-	u8 rx_buf[HTS221_RX_MAX_LENGTH];
-	u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
-};
-
-struct hts221_transfer_function {
-	int (*read)(struct device *dev, u8 addr, int len, u8 *data);
-	int (*write)(struct device *dev, u8 addr, int len, u8 *data);
-};
-
 enum hts221_sensor_type {
 	HTS221_SENSOR_H,
 	HTS221_SENSOR_T,
@@ -44,8 +31,8 @@ struct hts221_sensor {
 struct hts221_hw {
 	const char *name;
 	struct device *dev;
+	struct regmap *regmap;
 
-	struct mutex lock;
 	struct iio_trigger *trig;
 	int irq;
 
@@ -53,16 +40,12 @@ struct hts221_hw {
 
 	bool enabled;
 	u8 odr;
-
-	const struct hts221_transfer_function *tf;
-	struct hts221_transfer_buffer tb;
 };
 
 extern const struct dev_pm_ops hts221_pm_ops;
 
-int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val);
 int hts221_probe(struct device *dev, int irq, const char *name,
-		 const struct hts221_transfer_function *tf_ops);
+		 struct regmap *regmap);
 int hts221_set_enable(struct hts221_hw *hw, bool enable);
 int hts221_allocate_buffers(struct hts221_hw *hw);
 int hts221_allocate_trigger(struct hts221_hw *hw);

+ 20 - 19
drivers/iio/humidity/hts221_buffer.c

@@ -12,6 +12,8 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/irqreturn.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/trigger.h>
@@ -38,12 +40,10 @@ static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
 {
 	struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
 	struct hts221_hw *hw = iio_priv(iio_dev);
-	int err;
-
-	err = hts221_write_with_mask(hw, HTS221_REG_DRDY_EN_ADDR,
-				     HTS221_REG_DRDY_EN_MASK, state);
 
-	return err < 0 ? err : 0;
+	return regmap_update_bits(hw->regmap, HTS221_REG_DRDY_EN_ADDR,
+				  HTS221_REG_DRDY_EN_MASK,
+				  FIELD_PREP(HTS221_REG_DRDY_EN_MASK, state));
 }
 
 static const struct iio_trigger_ops hts221_trigger_ops = {
@@ -53,15 +53,13 @@ static const struct iio_trigger_ops hts221_trigger_ops = {
 static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
 {
 	struct hts221_hw *hw = private;
-	u8 status;
-	int err;
+	int err, status;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
-			   &status);
+	err = regmap_read(hw->regmap, HTS221_REG_STATUS_ADDR, &status);
 	if (err < 0)
 		return IRQ_HANDLED;
 
-	/* 
+	/*
 	 * H_DA bit (humidity data available) is routed to DRDY line.
 	 * Humidity sample is computed after temperature one.
 	 * Here we can assume data channels are both available if H_DA bit
@@ -102,8 +100,10 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
 		break;
 	}
 
-	err = hts221_write_with_mask(hw, HTS221_REG_DRDY_HL_ADDR,
-				     HTS221_REG_DRDY_HL_MASK, irq_active_low);
+	err = regmap_update_bits(hw->regmap, HTS221_REG_DRDY_HL_ADDR,
+				 HTS221_REG_DRDY_HL_MASK,
+				 FIELD_PREP(HTS221_REG_DRDY_HL_MASK,
+					    irq_active_low));
 	if (err < 0)
 		return err;
 
@@ -114,9 +114,10 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
 		open_drain = true;
 	}
 
-	err = hts221_write_with_mask(hw, HTS221_REG_DRDY_PP_OD_ADDR,
-				     HTS221_REG_DRDY_PP_OD_MASK,
-				     open_drain);
+	err = regmap_update_bits(hw->regmap, HTS221_REG_DRDY_PP_OD_ADDR,
+				 HTS221_REG_DRDY_PP_OD_MASK,
+				 FIELD_PREP(HTS221_REG_DRDY_PP_OD_MASK,
+					    open_drain));
 	if (err < 0)
 		return err;
 
@@ -171,15 +172,15 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
 
 	/* humidity data */
 	ch = &iio_dev->channels[HTS221_SENSOR_H];
-	err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
-			   buffer);
+	err = regmap_bulk_read(hw->regmap, ch->address,
+			       buffer, HTS221_DATA_SIZE);
 	if (err < 0)
 		goto out;
 
 	/* temperature data */
 	ch = &iio_dev->channels[HTS221_SENSOR_T];
-	err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
-			   buffer + HTS221_DATA_SIZE);
+	err = regmap_bulk_read(hw->regmap, ch->address,
+			       buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE);
 	if (err < 0)
 		goto out;
 

+ 52 - 80
drivers/iio/humidity/hts221_core.c

@@ -14,7 +14,8 @@
 #include <linux/iio/sysfs.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <asm/unaligned.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
 
 #include "hts221.h"
 
@@ -131,38 +132,11 @@ static const struct iio_chan_spec hts221_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(2),
 };
 
-int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val)
-{
-	u8 data;
-	int err;
-
-	mutex_lock(&hw->lock);
-
-	err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
-	if (err < 0) {
-		dev_err(hw->dev, "failed to read %02x register\n", addr);
-		goto unlock;
-	}
-
-	data = (data & ~mask) | ((val << __ffs(mask)) & mask);
-
-	err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
-	if (err < 0)
-		dev_err(hw->dev, "failed to write %02x register\n", addr);
-
-unlock:
-	mutex_unlock(&hw->lock);
-
-	return err;
-}
-
 static int hts221_check_whoami(struct hts221_hw *hw)
 {
-	u8 data;
-	int err;
+	int err, data;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
-			   &data);
+	err = regmap_read(hw->regmap, HTS221_REG_WHOAMI_ADDR, &data);
 	if (err < 0) {
 		dev_err(hw->dev, "failed to read whoami register\n");
 		return err;
@@ -188,8 +162,10 @@ static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
 	if (i == ARRAY_SIZE(hts221_odr_table))
 		return -EINVAL;
 
-	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
-				     HTS221_ODR_MASK, hts221_odr_table[i].val);
+	err = regmap_update_bits(hw->regmap, HTS221_REG_CNTRL1_ADDR,
+				 HTS221_ODR_MASK,
+				 FIELD_PREP(HTS221_ODR_MASK,
+					    hts221_odr_table[i].val));
 	if (err < 0)
 		return err;
 
@@ -202,8 +178,8 @@ static int hts221_update_avg(struct hts221_hw *hw,
 			     enum hts221_sensor_type type,
 			     u16 val)
 {
-	int i, err;
 	const struct hts221_avg *avg = &hts221_avg_list[type];
+	int i, err, data;
 
 	for (i = 0; i < HTS221_AVG_DEPTH; i++)
 		if (avg->avg_avl[i] == val)
@@ -212,7 +188,9 @@ static int hts221_update_avg(struct hts221_hw *hw,
 	if (i == HTS221_AVG_DEPTH)
 		return -EINVAL;
 
-	err = hts221_write_with_mask(hw, avg->addr, avg->mask, i);
+	data = ((i << __ffs(avg->mask)) & avg->mask);
+	err = regmap_update_bits(hw->regmap, avg->addr,
+				 avg->mask, data);
 	if (err < 0)
 		return err;
 
@@ -274,8 +252,9 @@ int hts221_set_enable(struct hts221_hw *hw, bool enable)
 {
 	int err;
 
-	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
-				     HTS221_ENABLE_MASK, enable);
+	err = regmap_update_bits(hw->regmap, HTS221_REG_CNTRL1_ADDR,
+				 HTS221_ENABLE_MASK,
+				 FIELD_PREP(HTS221_ENABLE_MASK, enable));
 	if (err < 0)
 		return err;
 
@@ -286,38 +265,35 @@ int hts221_set_enable(struct hts221_hw *hw, bool enable)
 
 static int hts221_parse_temp_caldata(struct hts221_hw *hw)
 {
-	int err, *slope, *b_gen;
+	int err, *slope, *b_gen, cal0, cal1;
 	s16 cal_x0, cal_x1, cal_y0, cal_y1;
-	u8 cal0, cal1;
+	__le16 val;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
-			   sizeof(cal0), &cal0);
+	err = regmap_read(hw->regmap, HTS221_REG_0T_CAL_Y_H, &cal0);
 	if (err < 0)
 		return err;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
-			   sizeof(cal1), &cal1);
+	err = regmap_read(hw->regmap, HTS221_REG_T1_T0_CAL_Y_H, &cal1);
 	if (err < 0)
 		return err;
-	cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
+	cal_y0 = ((cal1 & 0x3) << 8) | cal0;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
-			   sizeof(cal0), &cal0);
+	err = regmap_read(hw->regmap, HTS221_REG_1T_CAL_Y_H, &cal0);
 	if (err < 0)
 		return err;
 	cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
-			   (u8 *)&cal_x0);
+	err = regmap_bulk_read(hw->regmap, HTS221_REG_0T_CAL_X_L,
+			       &val, sizeof(val));
 	if (err < 0)
 		return err;
-	cal_x0 = le16_to_cpu(cal_x0);
+	cal_x0 = le16_to_cpu(val);
 
-	err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
-			   (u8 *)&cal_x1);
+	err = regmap_bulk_read(hw->regmap, HTS221_REG_1T_CAL_X_L,
+			       &val, sizeof(val));
 	if (err < 0)
 		return err;
-	cal_x1 = le16_to_cpu(cal_x1);
+	cal_x1 = le16_to_cpu(val);
 
 	slope = &hw->sensors[HTS221_SENSOR_T].slope;
 	b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
@@ -332,33 +308,31 @@ static int hts221_parse_temp_caldata(struct hts221_hw *hw)
 
 static int hts221_parse_rh_caldata(struct hts221_hw *hw)
 {
-	int err, *slope, *b_gen;
+	int err, *slope, *b_gen, data;
 	s16 cal_x0, cal_x1, cal_y0, cal_y1;
-	u8 data;
+	__le16 val;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
-			   &data);
+	err = regmap_read(hw->regmap, HTS221_REG_0RH_CAL_Y_H, &data);
 	if (err < 0)
 		return err;
 	cal_y0 = data;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
-			   &data);
+	err = regmap_read(hw->regmap, HTS221_REG_1RH_CAL_Y_H, &data);
 	if (err < 0)
 		return err;
 	cal_y1 = data;
 
-	err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
-			   (u8 *)&cal_x0);
+	err = regmap_bulk_read(hw->regmap, HTS221_REG_0RH_CAL_X_H,
+			       &val, sizeof(val));
 	if (err < 0)
 		return err;
-	cal_x0 = le16_to_cpu(cal_x0);
+	cal_x0 = le16_to_cpu(val);
 
-	err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
-			   (u8 *)&cal_x1);
+	err = regmap_bulk_read(hw->regmap, HTS221_REG_1RH_CAL_X_H,
+			       &val, sizeof(val));
 	if (err < 0)
 		return err;
-	cal_x1 = le16_to_cpu(cal_x1);
+	cal_x1 = le16_to_cpu(val);
 
 	slope = &hw->sensors[HTS221_SENSOR_H].slope;
 	b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
@@ -431,7 +405,7 @@ static int hts221_get_sensor_offset(struct hts221_hw *hw,
 
 static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
 {
-	u8 data[HTS221_DATA_SIZE];
+	__le16 data;
 	int err;
 
 	err = hts221_set_enable(hw, true);
@@ -440,13 +414,13 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
 
 	msleep(50);
 
-	err = hw->tf->read(hw->dev, addr, sizeof(data), data);
+	err = regmap_bulk_read(hw->regmap, addr, &data, sizeof(data));
 	if (err < 0)
 		return err;
 
 	hts221_set_enable(hw, false);
 
-	*val = (s16)get_unaligned_le16(data);
+	*val = (s16)le16_to_cpu(data);
 
 	return IIO_VAL_INT;
 }
@@ -582,7 +556,7 @@ static const struct iio_info hts221_info = {
 static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
 
 int hts221_probe(struct device *dev, int irq, const char *name,
-		 const struct hts221_transfer_function *tf_ops)
+		 struct regmap *regmap)
 {
 	struct iio_dev *iio_dev;
 	struct hts221_hw *hw;
@@ -599,9 +573,7 @@ int hts221_probe(struct device *dev, int irq, const char *name,
 	hw->name = name;
 	hw->dev = dev;
 	hw->irq = irq;
-	hw->tf = tf_ops;
-
-	mutex_init(&hw->lock);
+	hw->regmap = regmap;
 
 	err = hts221_check_whoami(hw);
 	if (err < 0)
@@ -616,8 +588,9 @@ int hts221_probe(struct device *dev, int irq, const char *name,
 	iio_dev->info = &hts221_info;
 
 	/* enable Block Data Update */
-	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
-				     HTS221_BDU_MASK, 1);
+	err = regmap_update_bits(hw->regmap, HTS221_REG_CNTRL1_ADDR,
+				 HTS221_BDU_MASK,
+				 FIELD_PREP(HTS221_BDU_MASK, 1));
 	if (err < 0)
 		return err;
 
@@ -673,12 +646,10 @@ static int __maybe_unused hts221_suspend(struct device *dev)
 {
 	struct iio_dev *iio_dev = dev_get_drvdata(dev);
 	struct hts221_hw *hw = iio_priv(iio_dev);
-	int err;
 
-	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
-				     HTS221_ENABLE_MASK, false);
-
-	return err < 0 ? err : 0;
+	return regmap_update_bits(hw->regmap, HTS221_REG_CNTRL1_ADDR,
+				  HTS221_ENABLE_MASK,
+				  FIELD_PREP(HTS221_ENABLE_MASK, false));
 }
 
 static int __maybe_unused hts221_resume(struct device *dev)
@@ -688,9 +659,10 @@ static int __maybe_unused hts221_resume(struct device *dev)
 	int err = 0;
 
 	if (hw->enabled)
-		err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
-					     HTS221_ENABLE_MASK, true);
-
+		err = regmap_update_bits(hw->regmap, HTS221_REG_CNTRL1_ADDR,
+					 HTS221_ENABLE_MASK,
+					 FIELD_PREP(HTS221_ENABLE_MASK,
+						    true));
 	return err;
 }
 

+ 18 - 46
drivers/iio/humidity/hts221_i2c.c

@@ -13,61 +13,33 @@
 #include <linux/acpi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include "hts221.h"
-
-#define I2C_AUTO_INCREMENT	0x80
-
-static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
-{
-	struct i2c_msg msg[2];
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (len > 1)
-		addr |= I2C_AUTO_INCREMENT;
-
-	msg[0].addr = client->addr;
-	msg[0].flags = client->flags;
-	msg[0].len = 1;
-	msg[0].buf = &addr;
-
-	msg[1].addr = client->addr;
-	msg[1].flags = client->flags | I2C_M_RD;
-	msg[1].len = len;
-	msg[1].buf = data;
-
-	return i2c_transfer(client->adapter, msg, 2);
-}
+#include <linux/regmap.h>
 
-static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
-{
-	u8 send[len + 1];
-	struct i2c_msg msg;
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (len > 1)
-		addr |= I2C_AUTO_INCREMENT;
-
-	send[0] = addr;
-	memcpy(&send[1], data, len * sizeof(u8));
-
-	msg.addr = client->addr;
-	msg.flags = client->flags;
-	msg.len = len + 1;
-	msg.buf = send;
+#include "hts221.h"
 
-	return i2c_transfer(client->adapter, &msg, 1);
-}
+#define HTS221_I2C_AUTO_INCREMENT	BIT(7)
 
-static const struct hts221_transfer_function hts221_transfer_fn = {
-	.read = hts221_i2c_read,
-	.write = hts221_i2c_write,
+static const struct regmap_config hts221_i2c_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.write_flag_mask = HTS221_I2C_AUTO_INCREMENT,
+	.read_flag_mask = HTS221_I2C_AUTO_INCREMENT,
 };
 
 static int hts221_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_i2c(client, &hts221_i2c_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
 	return hts221_probe(&client->dev, client->irq,
-			    client->name, &hts221_transfer_fn);
+			    client->name, regmap);
 }
 
 static const struct acpi_device_id hts221_acpi_match[] = {

+ 19 - 62
drivers/iio/humidity/hts221_spi.c

@@ -12,76 +12,33 @@
 #include <linux/module.h>
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
-#include "hts221.h"
-
-#define SENSORS_SPI_READ	0x80
-#define SPI_AUTO_INCREMENT	0x40
-
-static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
-{
-	int err;
-	struct spi_device *spi = to_spi_device(dev);
-	struct iio_dev *iio_dev = spi_get_drvdata(spi);
-	struct hts221_hw *hw = iio_priv(iio_dev);
-
-	struct spi_transfer xfers[] = {
-		{
-			.tx_buf = hw->tb.tx_buf,
-			.bits_per_word = 8,
-			.len = 1,
-		},
-		{
-			.rx_buf = hw->tb.rx_buf,
-			.bits_per_word = 8,
-			.len = len,
-		}
-	};
-
-	if (len > 1)
-		addr |= SPI_AUTO_INCREMENT;
-	hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
-
-	err = spi_sync_transfer(spi, xfers,  ARRAY_SIZE(xfers));
-	if (err < 0)
-		return err;
-
-	memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
-
-	return len;
-}
-
-static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
-{
-	struct spi_device *spi = to_spi_device(dev);
-	struct iio_dev *iio_dev = spi_get_drvdata(spi);
-	struct hts221_hw *hw = iio_priv(iio_dev);
-
-	struct spi_transfer xfers = {
-		.tx_buf = hw->tb.tx_buf,
-		.bits_per_word = 8,
-		.len = len + 1,
-	};
-
-	if (len >= HTS221_TX_MAX_LENGTH)
-		return -ENOMEM;
+#include <linux/regmap.h>
 
-	if (len > 1)
-		addr |= SPI_AUTO_INCREMENT;
-	hw->tb.tx_buf[0] = addr;
-	memcpy(&hw->tb.tx_buf[1], data, len);
+#include "hts221.h"
 
-	return spi_sync_transfer(spi, &xfers, 1);
-}
+#define HTS221_SPI_READ			BIT(7)
+#define HTS221_SPI_AUTO_INCREMENT	BIT(6)
 
-static const struct hts221_transfer_function hts221_transfer_fn = {
-	.read = hts221_spi_read,
-	.write = hts221_spi_write,
+static const struct regmap_config hts221_spi_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.write_flag_mask = HTS221_SPI_AUTO_INCREMENT,
+	.read_flag_mask = HTS221_SPI_READ | HTS221_SPI_AUTO_INCREMENT,
 };
 
 static int hts221_spi_probe(struct spi_device *spi)
 {
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &hts221_spi_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
 	return hts221_probe(&spi->dev, spi->irq,
-			    spi->modalias, &hts221_transfer_fn);
+			    spi->modalias, regmap);
 }
 
 static const struct of_device_id hts221_spi_of_match[] = {

+ 22 - 7
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h

@@ -27,7 +27,7 @@ enum st_lsm6dsx_hw_id {
 	ST_LSM6DSX_MAX_ID,
 };
 
-#define ST_LSM6DSX_BUFF_SIZE		256
+#define ST_LSM6DSX_BUFF_SIZE		400
 #define ST_LSM6DSX_CHAN_SIZE		2
 #define ST_LSM6DSX_SAMPLE_SIZE		6
 #define ST_LSM6DSX_MAX_WORD_LEN		((32 / ST_LSM6DSX_SAMPLE_SIZE) * \
@@ -57,6 +57,20 @@ struct st_lsm6dsx_fifo_ops {
 	u8 th_wl;
 };
 
+/**
+ * struct st_lsm6dsx_hw_ts_settings - ST IMU hw timer settings
+ * @timer_en: Hw timer enable register info (addr + mask).
+ * @hr_timer: Hw timer resolution register info (addr + mask).
+ * @fifo_en: Hw timer FIFO enable register info (addr + mask).
+ * @decimator: Hw timer FIFO decimator register info (addr + mask).
+ */
+struct st_lsm6dsx_hw_ts_settings {
+	struct st_lsm6dsx_reg timer_en;
+	struct st_lsm6dsx_reg hr_timer;
+	struct st_lsm6dsx_reg fifo_en;
+	struct st_lsm6dsx_reg decimator;
+};
+
 /**
  * struct st_lsm6dsx_settings - ST IMU sensor settings
  * @wai: Sensor WhoAmI default value.
@@ -64,6 +78,7 @@ struct st_lsm6dsx_fifo_ops {
  * @id: List of hw id supported by the driver configuration.
  * @decimator: List of decimator register info (addr + mask).
  * @fifo_ops: Sensor hw FIFO parameters.
+ * @ts_settings: Hw timer related settings.
  */
 struct st_lsm6dsx_settings {
 	u8 wai;
@@ -71,6 +86,7 @@ struct st_lsm6dsx_settings {
 	enum st_lsm6dsx_hw_id id[ST_LSM6DSX_MAX_ID];
 	struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
 	struct st_lsm6dsx_fifo_ops fifo_ops;
+	struct st_lsm6dsx_hw_ts_settings ts_settings;
 };
 
 enum st_lsm6dsx_sensor_id {
@@ -94,8 +110,7 @@ enum st_lsm6dsx_fifo_mode {
  * @watermark: Sensor watermark level.
  * @sip: Number of samples in a given pattern.
  * @decimator: FIFO decimation factor.
- * @delta_ts: Delta time between two consecutive interrupts.
- * @ts: Latest timestamp from the interrupt handler.
+ * @ts_ref: Sensor timestamp reference for hw one.
  */
 struct st_lsm6dsx_sensor {
 	char name[32];
@@ -108,9 +123,7 @@ struct st_lsm6dsx_sensor {
 	u16 watermark;
 	u8 sip;
 	u8 decimator;
-
-	s64 delta_ts;
-	s64 ts;
+	s64 ts_ref;
 };
 
 /**
@@ -122,7 +135,8 @@ struct st_lsm6dsx_sensor {
  * @conf_lock: Mutex to prevent concurrent FIFO configuration update.
  * @fifo_mode: FIFO operating mode supported by the device.
  * @enable_mask: Enabled sensor bitmask.
- * @sip: Total number of samples (acc/gyro) in a given pattern.
+ * @ts_sip: Total number of timestamp samples in a given pattern.
+ * @sip: Total number of samples (acc/gyro/ts) in a given pattern.
  * @buff: Device read buffer.
  * @iio_devs: Pointers to acc/gyro iio_dev instances.
  * @settings: Pointer to the specific sensor settings in use.
@@ -137,6 +151,7 @@ struct st_lsm6dsx_hw {
 
 	enum st_lsm6dsx_fifo_mode fifo_mode;
 	u8 enable_mask;
+	u8 ts_sip;
 	u8 sip;
 
 	u8 *buff;

+ 102 - 59
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c

@@ -46,9 +46,13 @@
 #define ST_LSM6DSX_FIFO_ODR_MASK		GENMASK(6, 3)
 #define ST_LSM6DSX_FIFO_EMPTY_MASK		BIT(12)
 #define ST_LSM6DSX_REG_FIFO_OUTL_ADDR		0x3e
+#define ST_LSM6DSX_REG_TS_RESET_ADDR		0x42
 
 #define ST_LSM6DSX_MAX_FIFO_ODR_VAL		0x08
 
+#define ST_LSM6DSX_TS_SENSITIVITY		25000UL /* 25us */
+#define ST_LSM6DSX_TS_RESET_VAL			0xaa
+
 struct st_lsm6dsx_decimator_entry {
 	u8 decimator;
 	u8 val;
@@ -98,9 +102,10 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
 
 static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 {
+	u16 max_odr, min_odr, sip = 0, ts_sip = 0;
+	const struct st_lsm6dsx_reg *ts_dec_reg;
 	struct st_lsm6dsx_sensor *sensor;
-	u16 max_odr, min_odr, sip = 0;
-	int err, i;
+	int err = 0, i;
 	u8 data;
 
 	st_lsm6dsx_get_max_min_odr(hw, &max_odr, &min_odr);
@@ -119,6 +124,7 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 			sensor->decimator = 0;
 			data = 0;
 		}
+		ts_sip = max_t(u16, ts_sip, sensor->sip);
 
 		dec_reg = &hw->settings->decimator[sensor->id];
 		if (dec_reg->addr) {
@@ -131,9 +137,23 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 		}
 		sip += sensor->sip;
 	}
-	hw->sip = sip;
+	hw->sip = sip + ts_sip;
+	hw->ts_sip = ts_sip;
 
-	return 0;
+	/*
+	 * update hw ts decimator if necessary. Decimator for hw timestamp
+	 * is always 1 or 0 in order to have a ts sample for each data
+	 * sample in FIFO
+	 */
+	ts_dec_reg = &hw->settings->ts_settings.decimator;
+	if (ts_dec_reg->addr) {
+		int val, ts_dec = !!hw->ts_sip;
+
+		val = ST_LSM6DSX_SHIFT_VAL(ts_dec, ts_dec_reg->mask);
+		err = regmap_update_bits(hw->regmap, ts_dec_reg->addr,
+					 ts_dec_reg->mask, val);
+	}
+	return err;
 }
 
 int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
@@ -208,6 +228,28 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 				 &wdata, sizeof(wdata));
 }
 
+static int st_lsm6dsx_reset_hw_ts(struct st_lsm6dsx_hw *hw)
+{
+	struct st_lsm6dsx_sensor *sensor;
+	int i, err;
+
+	/* reset hw ts counter */
+	err = regmap_write(hw->regmap, ST_LSM6DSX_REG_TS_RESET_ADDR,
+			   ST_LSM6DSX_TS_RESET_VAL);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
+		sensor = iio_priv(hw->iio_devs[i]);
+		/*
+		 * store enable buffer timestamp as reference for
+		 * hw timestamp
+		 */
+		sensor->ts_ref = iio_get_time_ns(hw->iio_devs[i]);
+	}
+	return 0;
+}
+
 /*
  * Set max bulk read to ST_LSM6DSX_MAX_WORD_LEN in order to avoid
  * a kmalloc for each bus access
@@ -231,6 +273,8 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 *data,
 	return 0;
 }
 
+#define ST_LSM6DSX_IIO_BUFF_SIZE	(ALIGN(ST_LSM6DSX_SAMPLE_SIZE, \
+					       sizeof(s64)) + sizeof(s64))
 /**
  * st_lsm6dsx_read_fifo() - LSM6DS3-LSM6DS3H-LSM6DSL-LSM6DSM read FIFO routine
  * @hw: Pointer to instance of struct st_lsm6dsx_hw.
@@ -243,11 +287,13 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 {
 	u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
 	u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
-	int err, acc_sip, gyro_sip, read_len, samples, offset;
+	int err, acc_sip, gyro_sip, ts_sip, read_len, offset;
 	struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
-	s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
-	u8 iio_buff[ALIGN(ST_LSM6DSX_SAMPLE_SIZE, sizeof(s64)) + sizeof(s64)];
+	u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+	u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+	bool reset_ts = false;
 	__le16 fifo_status;
+	s64 ts = 0;
 
 	err = regmap_bulk_read(hw->regmap,
 			       hw->settings->fifo_ops.fifo_diff.addr,
@@ -260,23 +306,10 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 
 	fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
 		   ST_LSM6DSX_CHAN_SIZE;
-	samples = fifo_len / ST_LSM6DSX_SAMPLE_SIZE;
 	fifo_len = (fifo_len / pattern_len) * pattern_len;
 
-	/*
-	 * compute delta timestamp between two consecutive samples
-	 * in order to estimate queueing time of data generated
-	 * by the sensor
-	 */
 	acc_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
-	acc_ts = acc_sensor->ts - acc_sensor->delta_ts;
-	acc_delta_ts = div_s64(acc_sensor->delta_ts * acc_sensor->decimator,
-			       samples);
-
 	gyro_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_GYRO]);
-	gyro_ts = gyro_sensor->ts - gyro_sensor->delta_ts;
-	gyro_delta_ts = div_s64(gyro_sensor->delta_ts * gyro_sensor->decimator,
-				samples);
 
 	for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
 		err = st_lsm6dsx_read_block(hw, hw->buff, pattern_len);
@@ -287,7 +320,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 		 * Data are written to the FIFO with a specific pattern
 		 * depending on the configured ODRs. The first sequence of data
 		 * stored in FIFO contains the data of all enabled sensors
-		 * (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated
+		 * (e.g. Gx, Gy, Gz, Ax, Ay, Az, Ts), then data are repeated
 		 * depending on the value of the decimation factor set for each
 		 * sensor.
 		 *
@@ -296,35 +329,65 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 		 *   - gyroscope ODR = 208Hz, accelerometer ODR = 104Hz
 		 * Since the gyroscope ODR is twice the accelerometer one, the
 		 * following pattern is repeated every 9 samples:
-		 *   - Gx, Gy, Gz, Ax, Ay, Az, Gx, Gy, Gz
+		 *   - Gx, Gy, Gz, Ax, Ay, Az, Ts, Gx, Gy, Gz, Ts, Gx, ..
 		 */
 		gyro_sip = gyro_sensor->sip;
 		acc_sip = acc_sensor->sip;
+		ts_sip = hw->ts_sip;
 		offset = 0;
 
 		while (acc_sip > 0 || gyro_sip > 0) {
-			if (gyro_sip-- > 0) {
-				memcpy(iio_buff, &hw->buff[offset],
+			if (gyro_sip > 0) {
+				memcpy(gyro_buff, &hw->buff[offset],
 				       ST_LSM6DSX_SAMPLE_SIZE);
-				iio_push_to_buffers_with_timestamp(
-					hw->iio_devs[ST_LSM6DSX_ID_GYRO],
-					iio_buff, gyro_ts);
 				offset += ST_LSM6DSX_SAMPLE_SIZE;
-				gyro_ts += gyro_delta_ts;
 			}
-
-			if (acc_sip-- > 0) {
-				memcpy(iio_buff, &hw->buff[offset],
+			if (acc_sip > 0) {
+				memcpy(acc_buff, &hw->buff[offset],
 				       ST_LSM6DSX_SAMPLE_SIZE);
-				iio_push_to_buffers_with_timestamp(
-					hw->iio_devs[ST_LSM6DSX_ID_ACC],
-					iio_buff, acc_ts);
 				offset += ST_LSM6DSX_SAMPLE_SIZE;
-				acc_ts += acc_delta_ts;
 			}
+
+			if (ts_sip-- > 0) {
+				u8 data[ST_LSM6DSX_SAMPLE_SIZE];
+
+				memcpy(data, &hw->buff[offset], sizeof(data));
+				/*
+				 * hw timestamp is 3B long and it is stored
+				 * in FIFO using 6B as 4th FIFO data set
+				 * according to this schema:
+				 * B0 = ts[15:8], B1 = ts[23:16], B3 = ts[7:0]
+				 */
+				ts = data[1] << 16 | data[0] << 8 | data[3];
+				/*
+				 * check if hw timestamp engine is going to
+				 * reset (the sensor generates an interrupt
+				 * to signal the hw timestamp will reset in
+				 * 1.638s)
+				 */
+				if (!reset_ts && ts >= 0xff0000)
+					reset_ts = true;
+				ts *= ST_LSM6DSX_TS_SENSITIVITY;
+
+				offset += ST_LSM6DSX_SAMPLE_SIZE;
+			}
+
+			if (gyro_sip-- > 0)
+				iio_push_to_buffers_with_timestamp(
+					hw->iio_devs[ST_LSM6DSX_ID_GYRO],
+					gyro_buff, gyro_sensor->ts_ref + ts);
+			if (acc_sip-- > 0)
+				iio_push_to_buffers_with_timestamp(
+					hw->iio_devs[ST_LSM6DSX_ID_ACC],
+					acc_buff, acc_sensor->ts_ref + ts);
 		}
 	}
 
+	if (unlikely(reset_ts)) {
+		err = st_lsm6dsx_reset_hw_ts(hw);
+		if (err < 0)
+			return err;
+	}
 	return read_len;
 }
 
@@ -379,15 +442,12 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
 		goto out;
 
 	if (hw->enable_mask) {
-		err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
+		/* reset hw ts counter */
+		err = st_lsm6dsx_reset_hw_ts(hw);
 		if (err < 0)
 			goto out;
 
-		/*
-		 * store enable buffer timestamp as reference to compute
-		 * first delta timestamp
-		 */
-		sensor->ts = iio_get_time_ns(iio_dev);
+		err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
 	}
 
 out:
@@ -399,25 +459,8 @@ out:
 static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
 {
 	struct st_lsm6dsx_hw *hw = private;
-	struct st_lsm6dsx_sensor *sensor;
-	int i;
-
-	if (!hw->sip)
-		return IRQ_NONE;
-
-	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
-		sensor = iio_priv(hw->iio_devs[i]);
-
-		if (sensor->sip > 0) {
-			s64 timestamp;
-
-			timestamp = iio_get_time_ns(hw->iio_devs[i]);
-			sensor->delta_ts = timestamp - sensor->ts;
-			sensor->ts = timestamp;
-		}
-	}
 
-	return IRQ_WAKE_THREAD;
+	return hw->sip > 0 ? IRQ_WAKE_THREAD : IRQ_NONE;
 }
 
 static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)

+ 100 - 4
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c

@@ -181,6 +181,24 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 			},
 			.th_wl = 3, /* 1LSB = 2B */
 		},
+		.ts_settings = {
+			.timer_en = {
+				.addr = 0x58,
+				.mask = BIT(7),
+			},
+			.hr_timer = {
+				.addr = 0x5c,
+				.mask = BIT(4),
+			},
+			.fifo_en = {
+				.addr = 0x07,
+				.mask = BIT(7),
+			},
+			.decimator = {
+				.addr = 0x09,
+				.mask = GENMASK(5, 3),
+			},
+		},
 	},
 	{
 		.wai = 0x69,
@@ -209,6 +227,24 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 			},
 			.th_wl = 3, /* 1LSB = 2B */
 		},
+		.ts_settings = {
+			.timer_en = {
+				.addr = 0x58,
+				.mask = BIT(7),
+			},
+			.hr_timer = {
+				.addr = 0x5c,
+				.mask = BIT(4),
+			},
+			.fifo_en = {
+				.addr = 0x07,
+				.mask = BIT(7),
+			},
+			.decimator = {
+				.addr = 0x09,
+				.mask = GENMASK(5, 3),
+			},
+		},
 	},
 	{
 		.wai = 0x6a,
@@ -238,6 +274,24 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
 			},
 			.th_wl = 3, /* 1LSB = 2B */
 		},
+		.ts_settings = {
+			.timer_en = {
+				.addr = 0x19,
+				.mask = BIT(5),
+			},
+			.hr_timer = {
+				.addr = 0x5c,
+				.mask = BIT(4),
+			},
+			.fifo_en = {
+				.addr = 0x07,
+				.mask = BIT(7),
+			},
+			.decimator = {
+				.addr = 0x09,
+				.mask = GENMASK(5, 3),
+			},
+		},
 	},
 };
 
@@ -630,6 +684,44 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
 	return err;
 }
 
+static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
+{
+	const struct st_lsm6dsx_hw_ts_settings *ts_settings;
+	int err, val;
+
+	ts_settings = &hw->settings->ts_settings;
+	/* enable hw timestamp generation if necessary */
+	if (ts_settings->timer_en.addr) {
+		val = ST_LSM6DSX_SHIFT_VAL(1, ts_settings->timer_en.mask);
+		err = regmap_update_bits(hw->regmap,
+					 ts_settings->timer_en.addr,
+					 ts_settings->timer_en.mask, val);
+		if (err < 0)
+			return err;
+	}
+
+	/* enable high resolution for hw ts timer if necessary */
+	if (ts_settings->hr_timer.addr) {
+		val = ST_LSM6DSX_SHIFT_VAL(1, ts_settings->hr_timer.mask);
+		err = regmap_update_bits(hw->regmap,
+					 ts_settings->hr_timer.addr,
+					 ts_settings->hr_timer.mask, val);
+		if (err < 0)
+			return err;
+	}
+
+	/* enable ts queueing in FIFO if necessary */
+	if (ts_settings->fifo_en.addr) {
+		val = ST_LSM6DSX_SHIFT_VAL(1, ts_settings->fifo_en.mask);
+		err = regmap_update_bits(hw->regmap,
+					 ts_settings->fifo_en.addr,
+					 ts_settings->fifo_en.mask, val);
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
 static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
 {
 	u8 drdy_int_reg;
@@ -654,10 +746,14 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
 	if (err < 0)
 		return err;
 
-	return regmap_update_bits(hw->regmap, drdy_int_reg,
-				  ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
-				  FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
-					     1));
+	err = regmap_update_bits(hw->regmap, drdy_int_reg,
+				 ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+				 FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+					    1));
+	if (err < 0)
+		return err;
+
+	return st_lsm6dsx_init_hw_timer(hw);
 }
 
 static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,

+ 10 - 0
drivers/iio/light/Kconfig

@@ -275,6 +275,16 @@ config LTR501
 	 This driver can also be built as a module.  If so, the module
          will be called ltr501.
 
+config LV0104CS
+	tristate "LV0104CS Ambient Light Sensor"
+	depends on I2C
+	help
+	 Say Y here if you want to build support for the On Semiconductor
+	 LV0104CS ambient light sensor.
+
+	 To compile this driver as a module, choose M here:
+	 the module will be called lv0104cs.
+
 config MAX44000
 	tristate "MAX44000 Ambient and Infrared Proximity Sensor"
 	depends on I2C

+ 1 - 0
drivers/iio/light/Makefile

@@ -26,6 +26,7 @@ obj-$(CONFIG_ISL29125)		+= isl29125.o
 obj-$(CONFIG_JSA1212)		+= jsa1212.o
 obj-$(CONFIG_SENSORS_LM3533)	+= lm3533-als.o
 obj-$(CONFIG_LTR501)		+= ltr501.o
+obj-$(CONFIG_LV0104CS)		+= lv0104cs.o
 obj-$(CONFIG_MAX44000)		+= max44000.o
 obj-$(CONFIG_OPT3001)		+= opt3001.o
 obj-$(CONFIG_PA12203001)	+= pa12203001.o

+ 4 - 12
drivers/iio/light/apds9960.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * apds9960.c - Support for Avago APDS9960 gesture/RGB/ALS/proximity sensor
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * TODO: gesture + proximity calib offsets
  */
@@ -1141,6 +1133,6 @@ static struct i2c_driver apds9960_driver = {
 };
 module_i2c_driver(apds9960_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("ADPS9960 Gesture/RGB/ALS/Proximity sensor");
 MODULE_LICENSE("GPL");

+ 1 - 0
drivers/iio/light/cros_ec_light_prox.c

@@ -276,6 +276,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids);
 static struct platform_driver cros_ec_light_prox_platform_driver = {
 	.driver = {
 		.name	= "cros-ec-light-prox",
+		.pm	= &cros_ec_sensors_pm_ops,
 	},
 	.probe		= cros_ec_light_prox_probe,
 	.id_table	= cros_ec_light_prox_ids,

+ 1 - 1
drivers/iio/light/hid-sensor-als.c

@@ -97,7 +97,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
 	*val = 0;
 	*val2 = 0;
 	switch (mask) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		switch (chan->scan_index) {
 		case  CHANNEL_SCAN_INDEX_INTENSITY:
 		case  CHANNEL_SCAN_INDEX_ILLUM:

+ 1 - 1
drivers/iio/light/lm3533-als.c

@@ -199,7 +199,7 @@ static int lm3533_als_read_raw(struct iio_dev *indio_dev,
 	int ret;
 
 	switch (mask) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		switch (chan->type) {
 		case IIO_LIGHT:
 			ret = lm3533_als_get_adc(indio_dev, false, val);

+ 531 - 0
drivers/iio/light/lv0104cs.c

@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * lv0104cs.c: LV0104CS Ambient Light Sensor Driver
+ *
+ * Copyright (C) 2018
+ * Author: Jeff LaBundy <jeff@labundy.com>
+ *
+ * 7-bit I2C slave address: 0x13
+ *
+ * Link to data sheet: http://www.onsemi.com/pub/Collateral/LV0104CS-D.PDF
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define LV0104CS_REGVAL_MEASURE		0xE0
+#define LV0104CS_REGVAL_SLEEP		0x00
+
+#define LV0104CS_SCALE_0_25X		0
+#define LV0104CS_SCALE_1X		1
+#define LV0104CS_SCALE_2X		2
+#define LV0104CS_SCALE_8X		3
+#define LV0104CS_SCALE_SHIFT		3
+
+#define LV0104CS_INTEG_12_5MS		0
+#define LV0104CS_INTEG_100MS		1
+#define LV0104CS_INTEG_200MS		2
+#define LV0104CS_INTEG_SHIFT		1
+
+#define LV0104CS_CALIBSCALE_UNITY	31
+
+struct lv0104cs_private {
+	struct i2c_client *client;
+	struct mutex lock;
+	u8 calibscale;
+	u8 scale;
+	u8 int_time;
+};
+
+struct lv0104cs_mapping {
+	int val;
+	int val2;
+	u8 regval;
+};
+
+static const struct lv0104cs_mapping lv0104cs_calibscales[] = {
+	{ 0, 666666, 0x81 },
+	{ 0, 800000, 0x82 },
+	{ 0, 857142, 0x83 },
+	{ 0, 888888, 0x84 },
+	{ 0, 909090, 0x85 },
+	{ 0, 923076, 0x86 },
+	{ 0, 933333, 0x87 },
+	{ 0, 941176, 0x88 },
+	{ 0, 947368, 0x89 },
+	{ 0, 952380, 0x8A },
+	{ 0, 956521, 0x8B },
+	{ 0, 960000, 0x8C },
+	{ 0, 962962, 0x8D },
+	{ 0, 965517, 0x8E },
+	{ 0, 967741, 0x8F },
+	{ 0, 969696, 0x90 },
+	{ 0, 971428, 0x91 },
+	{ 0, 972972, 0x92 },
+	{ 0, 974358, 0x93 },
+	{ 0, 975609, 0x94 },
+	{ 0, 976744, 0x95 },
+	{ 0, 977777, 0x96 },
+	{ 0, 978723, 0x97 },
+	{ 0, 979591, 0x98 },
+	{ 0, 980392, 0x99 },
+	{ 0, 981132, 0x9A },
+	{ 0, 981818, 0x9B },
+	{ 0, 982456, 0x9C },
+	{ 0, 983050, 0x9D },
+	{ 0, 983606, 0x9E },
+	{ 0, 984126, 0x9F },
+	{ 1, 0, 0x80 },
+	{ 1, 16129, 0xBF },
+	{ 1, 16666, 0xBE },
+	{ 1, 17241, 0xBD },
+	{ 1, 17857, 0xBC },
+	{ 1, 18518, 0xBB },
+	{ 1, 19230, 0xBA },
+	{ 1, 20000, 0xB9 },
+	{ 1, 20833, 0xB8 },
+	{ 1, 21739, 0xB7 },
+	{ 1, 22727, 0xB6 },
+	{ 1, 23809, 0xB5 },
+	{ 1, 24999, 0xB4 },
+	{ 1, 26315, 0xB3 },
+	{ 1, 27777, 0xB2 },
+	{ 1, 29411, 0xB1 },
+	{ 1, 31250, 0xB0 },
+	{ 1, 33333, 0xAF },
+	{ 1, 35714, 0xAE },
+	{ 1, 38461, 0xAD },
+	{ 1, 41666, 0xAC },
+	{ 1, 45454, 0xAB },
+	{ 1, 50000, 0xAA },
+	{ 1, 55555, 0xA9 },
+	{ 1, 62500, 0xA8 },
+	{ 1, 71428, 0xA7 },
+	{ 1, 83333, 0xA6 },
+	{ 1, 100000, 0xA5 },
+	{ 1, 125000, 0xA4 },
+	{ 1, 166666, 0xA3 },
+	{ 1, 250000, 0xA2 },
+	{ 1, 500000, 0xA1 },
+};
+
+static const struct lv0104cs_mapping lv0104cs_scales[] = {
+	{ 0, 250000, LV0104CS_SCALE_0_25X << LV0104CS_SCALE_SHIFT },
+	{ 1, 0, LV0104CS_SCALE_1X << LV0104CS_SCALE_SHIFT },
+	{ 2, 0, LV0104CS_SCALE_2X << LV0104CS_SCALE_SHIFT },
+	{ 8, 0, LV0104CS_SCALE_8X << LV0104CS_SCALE_SHIFT },
+};
+
+static const struct lv0104cs_mapping lv0104cs_int_times[] = {
+	{ 0, 12500, LV0104CS_INTEG_12_5MS << LV0104CS_INTEG_SHIFT },
+	{ 0, 100000, LV0104CS_INTEG_100MS << LV0104CS_INTEG_SHIFT },
+	{ 0, 200000, LV0104CS_INTEG_200MS << LV0104CS_INTEG_SHIFT },
+};
+
+static int lv0104cs_write_reg(struct i2c_client *client, u8 regval)
+{
+	int ret;
+
+	ret = i2c_master_send(client, (char *)&regval, sizeof(regval));
+	if (ret < 0)
+		return ret;
+	if (ret != sizeof(regval))
+		return -EIO;
+
+	return 0;
+}
+
+static int lv0104cs_read_adc(struct i2c_client *client, u16 *adc_output)
+{
+	__be16 regval;
+	int ret;
+
+	ret = i2c_master_recv(client, (char *)&regval, sizeof(regval));
+	if (ret < 0)
+		return ret;
+	if (ret != sizeof(regval))
+		return -EIO;
+
+	*adc_output = be16_to_cpu(regval);
+
+	return 0;
+}
+
+static int lv0104cs_get_lux(struct lv0104cs_private *lv0104cs,
+				int *val, int *val2)
+{
+	u8 regval = LV0104CS_REGVAL_MEASURE;
+	u16 adc_output;
+	int ret;
+
+	regval |= lv0104cs_scales[lv0104cs->scale].regval;
+	regval |= lv0104cs_int_times[lv0104cs->int_time].regval;
+	ret = lv0104cs_write_reg(lv0104cs->client, regval);
+	if (ret)
+		return ret;
+
+	/* wait for integration time to pass (with margin) */
+	switch (lv0104cs->int_time) {
+	case LV0104CS_INTEG_12_5MS:
+		msleep(50);
+		break;
+
+	case LV0104CS_INTEG_100MS:
+		msleep(150);
+		break;
+
+	case LV0104CS_INTEG_200MS:
+		msleep(250);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ret = lv0104cs_read_adc(lv0104cs->client, &adc_output);
+	if (ret)
+		return ret;
+
+	ret = lv0104cs_write_reg(lv0104cs->client, LV0104CS_REGVAL_SLEEP);
+	if (ret)
+		return ret;
+
+	/* convert ADC output to lux */
+	switch (lv0104cs->scale) {
+	case LV0104CS_SCALE_0_25X:
+		*val = adc_output * 4;
+		*val2 = 0;
+		return 0;
+
+	case LV0104CS_SCALE_1X:
+		*val = adc_output;
+		*val2 = 0;
+		return 0;
+
+	case LV0104CS_SCALE_2X:
+		*val = adc_output / 2;
+		*val2 = (adc_output % 2) * 500000;
+		return 0;
+
+	case LV0104CS_SCALE_8X:
+		*val = adc_output / 8;
+		*val2 = (adc_output % 8) * 125000;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int lv0104cs_read_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int *val, int *val2, long mask)
+{
+	struct lv0104cs_private *lv0104cs = iio_priv(indio_dev);
+	int ret;
+
+	if (chan->type != IIO_LIGHT)
+		return -EINVAL;
+
+	mutex_lock(&lv0104cs->lock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		ret = lv0104cs_get_lux(lv0104cs, val, val2);
+		if (ret)
+			goto err_mutex;
+		ret = IIO_VAL_INT_PLUS_MICRO;
+		break;
+
+	case IIO_CHAN_INFO_CALIBSCALE:
+		*val = lv0104cs_calibscales[lv0104cs->calibscale].val;
+		*val2 = lv0104cs_calibscales[lv0104cs->calibscale].val2;
+		ret = IIO_VAL_INT_PLUS_MICRO;
+		break;
+
+	case IIO_CHAN_INFO_SCALE:
+		*val = lv0104cs_scales[lv0104cs->scale].val;
+		*val2 = lv0104cs_scales[lv0104cs->scale].val2;
+		ret = IIO_VAL_INT_PLUS_MICRO;
+		break;
+
+	case IIO_CHAN_INFO_INT_TIME:
+		*val = lv0104cs_int_times[lv0104cs->int_time].val;
+		*val2 = lv0104cs_int_times[lv0104cs->int_time].val2;
+		ret = IIO_VAL_INT_PLUS_MICRO;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+err_mutex:
+	mutex_unlock(&lv0104cs->lock);
+
+	return ret;
+}
+
+static int lv0104cs_set_calibscale(struct lv0104cs_private *lv0104cs,
+				int val, int val2)
+{
+	int calibscale = val * 1000000 + val2;
+	int floor, ceil, mid;
+	int ret, i, index;
+
+	/* round to nearest quantized calibscale (sensitivity) */
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_calibscales) - 1; i++) {
+		floor = lv0104cs_calibscales[i].val * 1000000
+				+ lv0104cs_calibscales[i].val2;
+		ceil = lv0104cs_calibscales[i + 1].val * 1000000
+				+ lv0104cs_calibscales[i + 1].val2;
+		mid = (floor + ceil) / 2;
+
+		/* round down */
+		if (calibscale >= floor && calibscale < mid) {
+			index = i;
+			break;
+		}
+
+		/* round up */
+		if (calibscale >= mid && calibscale <= ceil) {
+			index = i + 1;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(lv0104cs_calibscales) - 1)
+		return -EINVAL;
+
+	mutex_lock(&lv0104cs->lock);
+
+	/* set calibscale (sensitivity) */
+	ret = lv0104cs_write_reg(lv0104cs->client,
+			lv0104cs_calibscales[index].regval);
+	if (ret)
+		goto err_mutex;
+
+	lv0104cs->calibscale = index;
+
+err_mutex:
+	mutex_unlock(&lv0104cs->lock);
+
+	return ret;
+}
+
+static int lv0104cs_set_scale(struct lv0104cs_private *lv0104cs,
+				int val, int val2)
+{
+	int i;
+
+	/* hard matching */
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_scales); i++) {
+		if (val != lv0104cs_scales[i].val)
+			continue;
+
+		if (val2 == lv0104cs_scales[i].val2)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(lv0104cs_scales))
+		return -EINVAL;
+
+	mutex_lock(&lv0104cs->lock);
+	lv0104cs->scale = i;
+	mutex_unlock(&lv0104cs->lock);
+
+	return 0;
+}
+
+static int lv0104cs_set_int_time(struct lv0104cs_private *lv0104cs,
+				int val, int val2)
+{
+	int i;
+
+	/* hard matching */
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_int_times); i++) {
+		if (val != lv0104cs_int_times[i].val)
+			continue;
+
+		if (val2 == lv0104cs_int_times[i].val2)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(lv0104cs_int_times))
+		return -EINVAL;
+
+	mutex_lock(&lv0104cs->lock);
+	lv0104cs->int_time = i;
+	mutex_unlock(&lv0104cs->lock);
+
+	return 0;
+}
+
+static int lv0104cs_write_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int val, int val2, long mask)
+{
+	struct lv0104cs_private *lv0104cs = iio_priv(indio_dev);
+
+	if (chan->type != IIO_LIGHT)
+		return -EINVAL;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBSCALE:
+		return lv0104cs_set_calibscale(lv0104cs, val, val2);
+
+	case IIO_CHAN_INFO_SCALE:
+		return lv0104cs_set_scale(lv0104cs, val, val2);
+
+	case IIO_CHAN_INFO_INT_TIME:
+		return lv0104cs_set_int_time(lv0104cs, val, val2);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static ssize_t lv0104cs_show_calibscale_avail(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_calibscales); i++) {
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
+				lv0104cs_calibscales[i].val,
+				lv0104cs_calibscales[i].val2);
+	}
+
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t lv0104cs_show_scale_avail(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_scales); i++) {
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
+				lv0104cs_scales[i].val,
+				lv0104cs_scales[i].val2);
+	}
+
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t lv0104cs_show_int_time_avail(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(lv0104cs_int_times); i++) {
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
+				lv0104cs_int_times[i].val,
+				lv0104cs_int_times[i].val2);
+	}
+
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(calibscale_available, 0444,
+				lv0104cs_show_calibscale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(scale_available, 0444,
+				lv0104cs_show_scale_avail, NULL, 0);
+static IIO_DEV_ATTR_INT_TIME_AVAIL(lv0104cs_show_int_time_avail);
+
+static struct attribute *lv0104cs_attributes[] = {
+	&iio_dev_attr_calibscale_available.dev_attr.attr,
+	&iio_dev_attr_scale_available.dev_attr.attr,
+	&iio_dev_attr_integration_time_available.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group lv0104cs_attribute_group = {
+	.attrs = lv0104cs_attributes,
+};
+
+static const struct iio_info lv0104cs_info = {
+	.attrs = &lv0104cs_attribute_group,
+	.read_raw = &lv0104cs_read_raw,
+	.write_raw = &lv0104cs_write_raw,
+};
+
+static const struct iio_chan_spec lv0104cs_channels[] = {
+	{
+		.type = IIO_LIGHT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+				      BIT(IIO_CHAN_INFO_CALIBSCALE) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_INT_TIME),
+	},
+};
+
+static int lv0104cs_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct iio_dev *indio_dev;
+	struct lv0104cs_private *lv0104cs;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*lv0104cs));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	lv0104cs = iio_priv(indio_dev);
+
+	i2c_set_clientdata(client, lv0104cs);
+	lv0104cs->client = client;
+
+	mutex_init(&lv0104cs->lock);
+
+	lv0104cs->calibscale = LV0104CS_CALIBSCALE_UNITY;
+	lv0104cs->scale = LV0104CS_SCALE_1X;
+	lv0104cs->int_time = LV0104CS_INTEG_200MS;
+
+	ret = lv0104cs_write_reg(lv0104cs->client,
+			lv0104cs_calibscales[LV0104CS_CALIBSCALE_UNITY].regval);
+	if (ret)
+		return ret;
+
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->channels = lv0104cs_channels;
+	indio_dev->num_channels = ARRAY_SIZE(lv0104cs_channels);
+	indio_dev->name = client->name;
+	indio_dev->info = &lv0104cs_info;
+
+	return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id lv0104cs_id[] = {
+	{ "lv0104cs", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, lv0104cs_id);
+
+static struct i2c_driver lv0104cs_i2c_driver = {
+	.driver = {
+		.name	= "lv0104cs",
+	},
+	.id_table	= lv0104cs_id,
+	.probe		= lv0104cs_probe,
+};
+module_i2c_driver(lv0104cs_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("LV0104CS Ambient Light Sensor Driver");
+MODULE_LICENSE("GPL");

+ 1 - 1
drivers/iio/magnetometer/hid-sensor-magn-3d.c

@@ -167,7 +167,7 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
 	*val = 0;
 	*val2 = 0;
 	switch (mask) {
-	case 0:
+	case IIO_CHAN_INFO_RAW:
 		hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
 		report_id =
 			magn_state->magn[chan->address].report_id;

+ 21 - 0
drivers/iio/potentiometer/Kconfig

@@ -5,6 +5,16 @@
 
 menu "Digital potentiometers"
 
+config AD5272
+	tristate "Analog Devices AD5272 and similar Digital Potentiometer driver"
+	depends on I2C
+	help
+	  Say yes here to build support for the Analog Devices AD5272 and AD5274
+	  digital potentiometer chip.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad5272.
+
 config DS1803
 	tristate "Maxim Integrated DS1803 Digital Potentiometer driver"
 	depends on I2C
@@ -37,6 +47,17 @@ config MAX5487
           To compile this driver as a module, choose M here: the
           module will be called max5487.
 
+config MCP4018
+	tristate "Microchip MCP4017/18/19 Digital Potentiometer driver"
+	depends on I2C
+	help
+	  Say yes here to build support for the Microchip
+	  MCP4017, MCP4018, MCP4019
+	  digital potentiometer chips.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mcp4018.
+
 config MCP4131
 	tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
 	depends on SPI

+ 2 - 0
drivers/iio/potentiometer/Makefile

@@ -4,9 +4,11 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD5272) += ad5272.o
 obj-$(CONFIG_DS1803) += ds1803.o
 obj-$(CONFIG_MAX5481) += max5481.o
 obj-$(CONFIG_MAX5487) += max5487.o
+obj-$(CONFIG_MCP4018) += mcp4018.o
 obj-$(CONFIG_MCP4131) += mcp4131.o
 obj-$(CONFIG_MCP4531) += mcp4531.o
 obj-$(CONFIG_TPL0102) += tpl0102.o

+ 231 - 0
drivers/iio/potentiometer/ad5272.c

@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Analog Devices AD5272 digital potentiometer driver
+ * Copyright (C) 2018 Phil Reid <preid@electromag.com.au>
+ *
+ * Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/AD5272_5274.pdf
+ *
+ * DEVID	#Wipers	#Positions	Resistor Opts (kOhm)	i2c address
+ * ad5272	1	1024		20, 50, 100		01011xx
+ * ad5274	1	256		20, 100			01011xx
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+
+#define  AD5272_RDAC_WR  1
+#define  AD5272_RDAC_RD  2
+#define  AD5272_RESET    4
+#define  AD5272_CTL      7
+
+#define  AD5272_RDAC_WR_EN  BIT(1)
+
+struct ad5272_cfg {
+	int max_pos;
+	int kohms;
+	int shift;
+};
+
+enum ad5272_type {
+	AD5272_020,
+	AD5272_050,
+	AD5272_100,
+	AD5274_020,
+	AD5274_100,
+};
+
+static const struct ad5272_cfg ad5272_cfg[] = {
+	[AD5272_020] = { .max_pos = 1024, .kohms = 20 },
+	[AD5272_050] = { .max_pos = 1024, .kohms = 50 },
+	[AD5272_100] = { .max_pos = 1024, .kohms = 100 },
+	[AD5274_020] = { .max_pos = 256,  .kohms = 20,  .shift = 2 },
+	[AD5274_100] = { .max_pos = 256,  .kohms = 100, .shift = 2 },
+};
+
+struct ad5272_data {
+	struct i2c_client       *client;
+	struct mutex            lock;
+	const struct ad5272_cfg *cfg;
+	u8                      buf[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec ad5272_channel = {
+	.type = IIO_RESISTANCE,
+	.output = 1,
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+};
+
+static int ad5272_write(struct ad5272_data *data, int reg, int val)
+{
+	int ret;
+
+	data->buf[0] = (reg << 2) | ((val >> 8) & 0x3);
+	data->buf[1] = (u8)val;
+
+	mutex_lock(&data->lock);
+	ret = i2c_master_send(data->client, data->buf, sizeof(data->buf));
+	mutex_unlock(&data->lock);
+	return ret < 0 ? ret : 0;
+}
+
+static int ad5272_read(struct ad5272_data *data, int reg, int *val)
+{
+	int ret;
+
+	data->buf[0] = reg << 2;
+	data->buf[1] = 0;
+
+	mutex_lock(&data->lock);
+	ret = i2c_master_send(data->client, data->buf, sizeof(data->buf));
+	if (ret < 0)
+		goto error;
+
+	ret = i2c_master_recv(data->client, data->buf, sizeof(data->buf));
+	if (ret < 0)
+		goto error;
+
+	*val = ((data->buf[0] & 0x3) << 8) | data->buf[1];
+	ret = 0;
+error:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int ad5272_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int *val, int *val2, long mask)
+{
+	struct ad5272_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW: {
+		ret = ad5272_read(data, AD5272_RDAC_RD, val);
+		*val = *val >> data->cfg->shift;
+		return ret ? ret : IIO_VAL_INT;
+	}
+	case IIO_CHAN_INFO_SCALE:
+		*val = 1000 * data->cfg->kohms;
+		*val2 = data->cfg->max_pos;
+		return IIO_VAL_FRACTIONAL;
+	}
+
+	return -EINVAL;
+}
+
+static int ad5272_write_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int val, int val2, long mask)
+{
+	struct ad5272_data *data = iio_priv(indio_dev);
+
+	if (mask != IIO_CHAN_INFO_RAW)
+		return -EINVAL;
+
+	if (val >= data->cfg->max_pos || val < 0 || val2)
+		return -EINVAL;
+
+	return ad5272_write(data, AD5272_RDAC_WR, val << data->cfg->shift);
+}
+
+static const struct iio_info ad5272_info = {
+	.read_raw = ad5272_read_raw,
+	.write_raw = ad5272_write_raw,
+};
+
+static int ad5272_reset(struct ad5272_data *data)
+{
+	struct gpio_desc *reset_gpio;
+
+	reset_gpio = devm_gpiod_get_optional(&data->client->dev, "reset",
+		GPIOD_OUT_LOW);
+	if (IS_ERR(reset_gpio))
+		return PTR_ERR(reset_gpio);
+
+	if (reset_gpio) {
+		udelay(1);
+		gpiod_set_value(reset_gpio, 1);
+	} else {
+		ad5272_write(data, AD5272_RESET, 0);
+	}
+	usleep_range(1000, 2000);
+
+	return 0;
+}
+
+static int ad5272_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct iio_dev *indio_dev;
+	struct ad5272_data *data;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, indio_dev);
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	mutex_init(&data->lock);
+	data->cfg = &ad5272_cfg[id->driver_data];
+
+	ret = ad5272_reset(data);
+	if (ret)
+		return ret;
+
+	ret = ad5272_write(data, AD5272_CTL, AD5272_RDAC_WR_EN);
+	if (ret < 0)
+		return -ENODEV;
+
+	indio_dev->dev.parent = dev;
+	indio_dev->info = &ad5272_info;
+	indio_dev->channels = &ad5272_channel;
+	indio_dev->num_channels = 1;
+	indio_dev->name = client->name;
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id ad5272_dt_ids[] = {
+	{ .compatible = "adi,ad5272-020", .data = (void *)AD5272_020 },
+	{ .compatible = "adi,ad5272-050", .data = (void *)AD5272_050 },
+	{ .compatible = "adi,ad5272-100", .data = (void *)AD5272_100 },
+	{ .compatible = "adi,ad5274-020", .data = (void *)AD5274_020 },
+	{ .compatible = "adi,ad5274-100", .data = (void *)AD5274_100 },
+	{}
+};
+MODULE_DEVICE_TABLE(of, ad5272_dt_ids);
+#endif /* CONFIG_OF */
+
+static const struct i2c_device_id ad5272_id[] = {
+	{ "ad5272-020", AD5272_020 },
+	{ "ad5272-050", AD5272_050 },
+	{ "ad5272-100", AD5272_100 },
+	{ "ad5274-020", AD5274_020 },
+	{ "ad5274-100", AD5274_100 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, ad5272_id);
+
+static struct i2c_driver ad5272_driver = {
+	.driver = {
+		.name	= "ad5272",
+		.of_match_table = of_match_ptr(ad5272_dt_ids),
+	},
+	.probe		= ad5272_probe,
+	.id_table	= ad5272_id,
+};
+
+module_i2c_driver(ad5272_driver);
+
+MODULE_AUTHOR("Phil Reid <preid@eletromag.com.au>");
+MODULE_DESCRIPTION("AD5272 digital potentiometer");
+MODULE_LICENSE("GPL v2");

+ 1 - 1
drivers/iio/potentiometer/ds1803.c

@@ -64,7 +64,7 @@ static int ds1803_read_raw(struct iio_dev *indio_dev,
 	struct ds1803_data *data = iio_priv(indio_dev);
 	int pot = chan->channel;
 	int ret;
-	u8 result[indio_dev->num_channels];
+	u8 result[ARRAY_SIZE(ds1803_channels)];
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:

+ 194 - 0
drivers/iio/potentiometer/mcp4018.c

@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Industrial I/O driver for Microchip digital potentiometers
+ * Copyright (c) 2018  Axentia Technologies AB
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * Datasheet: http://www.microchip.com/downloads/en/DeviceDoc/22147a.pdf
+ *
+ * DEVID	#Wipers	#Positions	Resistor Opts (kOhm)
+ * mcp4017	1	128		5, 10, 50, 100
+ * mcp4018	1	128		5, 10, 50, 100
+ * mcp4019	1	128		5, 10, 50, 100
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define MCP4018_WIPER_MAX 127
+
+struct mcp4018_cfg {
+	int kohms;
+};
+
+enum mcp4018_type {
+	MCP4018_502,
+	MCP4018_103,
+	MCP4018_503,
+	MCP4018_104,
+};
+
+static const struct mcp4018_cfg mcp4018_cfg[] = {
+	[MCP4018_502] = { .kohms =   5, },
+	[MCP4018_103] = { .kohms =  10, },
+	[MCP4018_503] = { .kohms =  50, },
+	[MCP4018_104] = { .kohms = 100, },
+};
+
+struct mcp4018_data {
+	struct i2c_client *client;
+	const struct mcp4018_cfg *cfg;
+};
+
+static const struct iio_chan_spec mcp4018_channel = {
+	.type = IIO_RESISTANCE,
+	.indexed = 1,
+	.output = 1,
+	.channel = 0,
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+};
+
+static int mcp4018_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2, long mask)
+{
+	struct mcp4018_data *data = iio_priv(indio_dev);
+	s32 ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = i2c_smbus_read_byte(data->client);
+		if (ret < 0)
+			return ret;
+		*val = ret;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 1000 * data->cfg->kohms;
+		*val2 = MCP4018_WIPER_MAX;
+		return IIO_VAL_FRACTIONAL;
+	}
+
+	return -EINVAL;
+}
+
+static int mcp4018_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	struct mcp4018_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (val > MCP4018_WIPER_MAX || val < 0)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return i2c_smbus_write_byte(data->client, val);
+}
+
+static const struct iio_info mcp4018_info = {
+	.read_raw = mcp4018_read_raw,
+	.write_raw = mcp4018_write_raw,
+};
+
+#ifdef CONFIG_OF
+
+#define MCP4018_COMPATIBLE(of_compatible, cfg) {	\
+	.compatible = of_compatible,			\
+	.data = &mcp4018_cfg[cfg],			\
+}
+
+static const struct of_device_id mcp4018_of_match[] = {
+	MCP4018_COMPATIBLE("microchip,mcp4017-502", MCP4018_502),
+	MCP4018_COMPATIBLE("microchip,mcp4017-103", MCP4018_103),
+	MCP4018_COMPATIBLE("microchip,mcp4017-503", MCP4018_503),
+	MCP4018_COMPATIBLE("microchip,mcp4017-104", MCP4018_104),
+	MCP4018_COMPATIBLE("microchip,mcp4018-502", MCP4018_502),
+	MCP4018_COMPATIBLE("microchip,mcp4018-103", MCP4018_103),
+	MCP4018_COMPATIBLE("microchip,mcp4018-503", MCP4018_503),
+	MCP4018_COMPATIBLE("microchip,mcp4018-104", MCP4018_104),
+	MCP4018_COMPATIBLE("microchip,mcp4019-502", MCP4018_502),
+	MCP4018_COMPATIBLE("microchip,mcp4019-103", MCP4018_103),
+	MCP4018_COMPATIBLE("microchip,mcp4019-503", MCP4018_503),
+	MCP4018_COMPATIBLE("microchip,mcp4019-104", MCP4018_104),
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mcp4018_of_match);
+
+#endif
+
+static int mcp4018_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct mcp4018_data *data;
+	struct iio_dev *indio_dev;
+	const struct of_device_id *match;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE)) {
+		dev_err(dev, "SMBUS Byte transfers not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+	data->client = client;
+
+	match = of_match_device(of_match_ptr(mcp4018_of_match), dev);
+	if (match)
+		data->cfg = of_device_get_match_data(dev);
+	else
+		data->cfg = &mcp4018_cfg[id->driver_data];
+
+	indio_dev->dev.parent = dev;
+	indio_dev->info = &mcp4018_info;
+	indio_dev->channels = &mcp4018_channel;
+	indio_dev->num_channels = 1;
+	indio_dev->name = client->name;
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id mcp4018_id[] = {
+	{ "mcp4017-502", MCP4018_502 },
+	{ "mcp4017-103", MCP4018_103 },
+	{ "mcp4017-503", MCP4018_503 },
+	{ "mcp4017-104", MCP4018_104 },
+	{ "mcp4018-502", MCP4018_502 },
+	{ "mcp4018-103", MCP4018_103 },
+	{ "mcp4018-503", MCP4018_503 },
+	{ "mcp4018-104", MCP4018_104 },
+	{ "mcp4019-502", MCP4018_502 },
+	{ "mcp4019-103", MCP4018_103 },
+	{ "mcp4019-503", MCP4018_503 },
+	{ "mcp4019-104", MCP4018_104 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, mcp4018_id);
+
+static struct i2c_driver mcp4018_driver = {
+	.driver = {
+		.name	= "mcp4018",
+		.of_match_table = of_match_ptr(mcp4018_of_match),
+	},
+	.probe		= mcp4018_probe,
+	.id_table	= mcp4018_id,
+};
+
+module_i2c_driver(mcp4018_driver);
+
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_DESCRIPTION("MCP4018 digital potentiometer");
+MODULE_LICENSE("GPL");

+ 4 - 12
drivers/iio/potentiometer/tpl0102.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * tpl0102.c - Support for Texas Instruments digital potentiometers
  *
- * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * TODO: enable/disable hi-z output control
  */
@@ -156,6 +148,6 @@ static struct i2c_driver tpl0102_driver = {
 
 module_i2c_driver(tpl0102_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("TPL0102 digital potentiometer");
 MODULE_LICENSE("GPL");

+ 4 - 12
drivers/iio/potentiostat/lmp91000.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * lmp91000.c - Support for Texas Instruments digital potentiostats
  *
- * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016, 2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * TODO: bias voltage + polarity control, and multiple chip support
  */
@@ -440,6 +432,6 @@ static struct i2c_driver lmp91000_driver = {
 };
 module_i2c_driver(lmp91000_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("LMP91000 digital potentiostat");
 MODULE_LICENSE("GPL");

+ 1 - 1
drivers/iio/pressure/ms5611.h

@@ -63,7 +63,7 @@ struct ms5611_state {
 };
 
 int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
-                 const char* name, int type);
+		 const char *name, int type);
 int ms5611_remove(struct iio_dev *indio_dev);
 
 #endif /* _MS5611_H */

+ 4 - 13
drivers/iio/proximity/as3935.c

@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * as3935.c - Support for AS3935 Franklin lightning sensor
  *
- * Copyright (C) 2014 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+ * Copyright (C) 2014, 2017-2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -502,6 +493,6 @@ static struct spi_driver as3935_driver = {
 };
 module_spi_driver(as3935_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("AS3935 lightning sensor");
 MODULE_LICENSE("GPL");

+ 4 - 12
drivers/iio/proximity/pulsedlight-lidar-lite-v2.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * pulsedlight-lidar-lite-v2.c - Support for PulsedLight LIDAR sensor
  *
- * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015, 2017-2018
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
  *
  * TODO: interrupt mode, and signal strength reporting
  */
@@ -377,6 +369,6 @@ static struct i2c_driver lidar_driver = {
 };
 module_i2c_driver(lidar_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("PulsedLight LIDAR sensor");
 MODULE_LICENSE("GPL");

+ 20 - 5
drivers/iio/proximity/sx9500.c

@@ -32,9 +32,6 @@
 #define SX9500_DRIVER_NAME		"sx9500"
 #define SX9500_IRQ_NAME			"sx9500_event"
 
-#define SX9500_GPIO_INT			"interrupt"
-#define SX9500_GPIO_RESET		"reset"
-
 /* Register definitions. */
 #define SX9500_REG_IRQ_SRC		0x00
 #define SX9500_REG_STAT			0x01
@@ -866,26 +863,44 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
 	return sx9500_init_compensation(indio_dev);
 }
 
+static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
+static const struct acpi_gpio_params interrupt_gpios = { 2, 0, false };
+
+static const struct acpi_gpio_mapping acpi_sx9500_gpios[] = {
+	{ "reset-gpios", &reset_gpios, 1 },
+	/*
+	 * Some platforms have a bug in ACPI GPIO description making IRQ
+	 * GPIO to be output only. Ask the GPIO core to ignore this limit.
+	 */
+	{ "interrupt-gpios", &interrupt_gpios, 1, ACPI_GPIO_QUIRK_NO_IO_RESTRICTION },
+	{ },
+};
+
 static void sx9500_gpio_probe(struct i2c_client *client,
 			      struct sx9500_data *data)
 {
 	struct gpio_desc *gpiod_int;
 	struct device *dev;
+	int ret;
 
 	if (!client)
 		return;
 
 	dev = &client->dev;
 
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_sx9500_gpios);
+	if (ret)
+		dev_dbg(dev, "Unable to add GPIO mapping table\n");
+
 	if (client->irq <= 0) {
-		gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
+		gpiod_int = devm_gpiod_get(dev, "interrupt", GPIOD_IN);
 		if (IS_ERR(gpiod_int))
 			dev_err(dev, "gpio get irq failed\n");
 		else
 			client->irq = gpiod_to_irq(gpiod_int);
 	}
 
-	data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
+	data->gpiod_rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
 	if (IS_ERR(data->gpiod_rst)) {
 		dev_warn(dev, "gpio get reset pin failed\n");
 		data->gpiod_rst = NULL;

+ 12 - 0
drivers/iio/temperature/Kconfig

@@ -43,6 +43,18 @@ config MLX90614
 	  This driver can also be built as a module. If so, the module will
 	  be called mlx90614.
 
+config MLX90632
+	tristate "MLX90632 contact-less infrared sensor with medical accuracy"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  If you say yes here you get support for the Melexis
+	  MLX90632 contact-less infrared sensor with medical accuracy
+	  connected with I2C.
+
+	  This driver can also be built as a module. If so, the module will
+	  be called mlx90632.
+
 config TMP006
 	tristate "TMP006 infrared thermopile sensor"
 	depends on I2C

+ 1 - 0
drivers/iio/temperature/Makefile

@@ -6,6 +6,7 @@
 obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
 obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
 obj-$(CONFIG_MLX90614) += mlx90614.o
+obj-$(CONFIG_MLX90632) += mlx90632.o
 obj-$(CONFIG_TMP006) += tmp006.o
 obj-$(CONFIG_TMP007) += tmp007.o
 obj-$(CONFIG_TSYS01) += tsys01.o

+ 4 - 12
drivers/iio/temperature/maxim_thermocouple.c

@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * maxim_thermocouple.c  - Support for Maxim thermocouple chips
  *
- * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018 Matt Ranostay
+ * Author: <matt.ranostay@konsulko.com>
  */
 
 #include <linux/module.h>
@@ -281,6 +273,6 @@ static struct spi_driver maxim_thermocouple_driver = {
 };
 module_spi_driver(maxim_thermocouple_driver);
 
-MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
 MODULE_DESCRIPTION("Maxim thermocouple sensors");
 MODULE_LICENSE("GPL");

+ 752 - 0
drivers/iio/temperature/mlx90632.c

@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mlx90632.c - Melexis MLX90632 contactless IR temperature sensor
+ *
+ * Copyright (c) 2017 Melexis <cmo@melexis.com>
+ *
+ * Driver for the Melexis MLX90632 I2C 16-bit IR thermopile sensor
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Memory sections addresses */
+#define MLX90632_ADDR_RAM	0x4000 /* Start address of ram */
+#define MLX90632_ADDR_EEPROM	0x2480 /* Start address of user eeprom */
+
+/* EEPROM addresses - used at startup */
+#define MLX90632_EE_CTRL	0x24d4 /* Control register initial value */
+#define MLX90632_EE_I2C_ADDR	0x24d5 /* I2C address register initial value */
+#define MLX90632_EE_VERSION	0x240b /* EEPROM version reg address */
+#define MLX90632_EE_P_R		0x240c /* P_R calibration register 32bit */
+#define MLX90632_EE_P_G		0x240e /* P_G calibration register 32bit */
+#define MLX90632_EE_P_T		0x2410 /* P_T calibration register 32bit */
+#define MLX90632_EE_P_O		0x2412 /* P_O calibration register 32bit */
+#define MLX90632_EE_Aa		0x2414 /* Aa calibration register 32bit */
+#define MLX90632_EE_Ab		0x2416 /* Ab calibration register 32bit */
+#define MLX90632_EE_Ba		0x2418 /* Ba calibration register 32bit */
+#define MLX90632_EE_Bb		0x241a /* Bb calibration register 32bit */
+#define MLX90632_EE_Ca		0x241c /* Ca calibration register 32bit */
+#define MLX90632_EE_Cb		0x241e /* Cb calibration register 32bit */
+#define MLX90632_EE_Da		0x2420 /* Da calibration register 32bit */
+#define MLX90632_EE_Db		0x2422 /* Db calibration register 32bit */
+#define MLX90632_EE_Ea		0x2424 /* Ea calibration register 32bit */
+#define MLX90632_EE_Eb		0x2426 /* Eb calibration register 32bit */
+#define MLX90632_EE_Fa		0x2428 /* Fa calibration register 32bit */
+#define MLX90632_EE_Fb		0x242a /* Fb calibration register 32bit */
+#define MLX90632_EE_Ga		0x242c /* Ga calibration register 32bit */
+
+#define MLX90632_EE_Gb		0x242e /* Gb calibration register 16bit */
+#define MLX90632_EE_Ka		0x242f /* Ka calibration register 16bit */
+
+#define MLX90632_EE_Ha		0x2481 /* Ha customer calib value reg 16bit */
+#define MLX90632_EE_Hb		0x2482 /* Hb customer calib value reg 16bit */
+
+/* Register addresses - volatile */
+#define MLX90632_REG_I2C_ADDR	0x3000 /* Chip I2C address register */
+
+/* Control register address - volatile */
+#define MLX90632_REG_CONTROL	0x3001 /* Control Register address */
+#define   MLX90632_CFG_PWR_MASK		GENMASK(2, 1) /* PowerMode Mask */
+/* PowerModes statuses */
+#define MLX90632_PWR_STATUS(ctrl_val) (ctrl_val << 1)
+#define MLX90632_PWR_STATUS_HALT MLX90632_PWR_STATUS(0) /* hold */
+#define MLX90632_PWR_STATUS_SLEEP_STEP MLX90632_PWR_STATUS(1) /* sleep step*/
+#define MLX90632_PWR_STATUS_STEP MLX90632_PWR_STATUS(2) /* step */
+#define MLX90632_PWR_STATUS_CONTINUOUS MLX90632_PWR_STATUS(3) /* continuous*/
+
+/* Device status register - volatile */
+#define MLX90632_REG_STATUS	0x3fff /* Device status register */
+#define   MLX90632_STAT_BUSY		BIT(10) /* Device busy indicator */
+#define   MLX90632_STAT_EE_BUSY		BIT(9) /* EEPROM busy indicator */
+#define   MLX90632_STAT_BRST		BIT(8) /* Brown out reset indicator */
+#define   MLX90632_STAT_CYCLE_POS	GENMASK(6, 2) /* Data position */
+#define   MLX90632_STAT_DATA_RDY	BIT(0) /* Data ready indicator */
+
+/* RAM_MEAS address-es for each channel */
+#define MLX90632_RAM_1(meas_num)	(MLX90632_ADDR_RAM + 3 * meas_num)
+#define MLX90632_RAM_2(meas_num)	(MLX90632_ADDR_RAM + 3 * meas_num + 1)
+#define MLX90632_RAM_3(meas_num)	(MLX90632_ADDR_RAM + 3 * meas_num + 2)
+
+/* Magic constants */
+#define MLX90632_ID_MEDICAL	0x0105 /* EEPROM DSPv5 Medical device id */
+#define MLX90632_ID_CONSUMER	0x0205 /* EEPROM DSPv5 Consumer device id */
+#define MLX90632_RESET_CMD	0x0006 /* Reset sensor (address or global) */
+#define MLX90632_REF_12		12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */
+#define MLX90632_REF_3		12LL /**< ResCtrlRef value of Channel 3 */
+#define MLX90632_MAX_MEAS_NUM	31 /**< Maximum measurements in list */
+#define MLX90632_SLEEP_DELAY_MS 3000 /**< Autosleep delay */
+
+struct mlx90632_data {
+	struct i2c_client *client;
+	struct mutex lock; /* Multiple reads for single measurement */
+	struct regmap *regmap;
+	u16 emissivity;
+};
+
+static const struct regmap_range mlx90632_volatile_reg_range[] = {
+	regmap_reg_range(MLX90632_REG_I2C_ADDR, MLX90632_REG_CONTROL),
+	regmap_reg_range(MLX90632_REG_STATUS, MLX90632_REG_STATUS),
+	regmap_reg_range(MLX90632_RAM_1(0),
+			 MLX90632_RAM_3(MLX90632_MAX_MEAS_NUM)),
+};
+
+static const struct regmap_access_table mlx90632_volatile_regs_tbl = {
+	.yes_ranges = mlx90632_volatile_reg_range,
+	.n_yes_ranges = ARRAY_SIZE(mlx90632_volatile_reg_range),
+};
+
+static const struct regmap_range mlx90632_read_reg_range[] = {
+	regmap_reg_range(MLX90632_EE_VERSION, MLX90632_EE_Ka),
+	regmap_reg_range(MLX90632_EE_CTRL, MLX90632_EE_I2C_ADDR),
+	regmap_reg_range(MLX90632_EE_Ha, MLX90632_EE_Hb),
+	regmap_reg_range(MLX90632_REG_I2C_ADDR, MLX90632_REG_CONTROL),
+	regmap_reg_range(MLX90632_REG_STATUS, MLX90632_REG_STATUS),
+	regmap_reg_range(MLX90632_RAM_1(0),
+			 MLX90632_RAM_3(MLX90632_MAX_MEAS_NUM)),
+};
+
+static const struct regmap_access_table mlx90632_readable_regs_tbl = {
+	.yes_ranges = mlx90632_read_reg_range,
+	.n_yes_ranges = ARRAY_SIZE(mlx90632_read_reg_range),
+};
+
+static const struct regmap_range mlx90632_no_write_reg_range[] = {
+	regmap_reg_range(MLX90632_EE_VERSION, MLX90632_EE_Ka),
+	regmap_reg_range(MLX90632_RAM_1(0),
+			 MLX90632_RAM_3(MLX90632_MAX_MEAS_NUM)),
+};
+
+static const struct regmap_access_table mlx90632_writeable_regs_tbl = {
+	.no_ranges = mlx90632_no_write_reg_range,
+	.n_no_ranges = ARRAY_SIZE(mlx90632_no_write_reg_range),
+};
+
+static const struct regmap_config mlx90632_regmap = {
+	.reg_bits = 16,
+	.val_bits = 16,
+
+	.volatile_table = &mlx90632_volatile_regs_tbl,
+	.rd_table = &mlx90632_readable_regs_tbl,
+	.wr_table = &mlx90632_writeable_regs_tbl,
+
+	.use_single_rw = true,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static s32 mlx90632_pwr_set_sleep_step(struct regmap *regmap)
+{
+	return regmap_update_bits(regmap, MLX90632_REG_CONTROL,
+				  MLX90632_CFG_PWR_MASK,
+				  MLX90632_PWR_STATUS_SLEEP_STEP);
+}
+
+static s32 mlx90632_pwr_continuous(struct regmap *regmap)
+{
+	return regmap_update_bits(regmap, MLX90632_REG_CONTROL,
+				  MLX90632_CFG_PWR_MASK,
+				  MLX90632_PWR_STATUS_CONTINUOUS);
+}
+
+/**
+ * mlx90632_perform_measurement - Trigger and retrieve current measurement cycle
+ * @*data: pointer to mlx90632_data object containing regmap information
+ *
+ * Perform a measurement and return latest measurement cycle position reported
+ * by sensor. This is a blocking function for 500ms, as that is default sensor
+ * refresh rate.
+ */
+static int mlx90632_perform_measurement(struct mlx90632_data *data)
+{
+	int ret, tries = 100;
+	unsigned int reg_status;
+
+	ret = regmap_update_bits(data->regmap, MLX90632_REG_STATUS,
+				 MLX90632_STAT_DATA_RDY, 0);
+	if (ret < 0)
+		return ret;
+
+	while (tries-- > 0) {
+		ret = regmap_read(data->regmap, MLX90632_REG_STATUS,
+				  &reg_status);
+		if (ret < 0)
+			return ret;
+		if (reg_status & MLX90632_STAT_DATA_RDY)
+			break;
+		usleep_range(10000, 11000);
+	}
+
+	if (tries < 0) {
+		dev_err(&data->client->dev, "data not ready");
+		return -ETIMEDOUT;
+	}
+
+	return (reg_status & MLX90632_STAT_CYCLE_POS) >> 2;
+}
+
+static int mlx90632_channel_new_select(int perform_ret, uint8_t *channel_new,
+				       uint8_t *channel_old)
+{
+	switch (perform_ret) {
+	case 1:
+		*channel_new = 1;
+		*channel_old = 2;
+		break;
+	case 2:
+		*channel_new = 2;
+		*channel_old = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx90632_read_ambient_raw(struct regmap *regmap,
+				     s16 *ambient_new_raw, s16 *ambient_old_raw)
+{
+	int ret;
+	unsigned int read_tmp;
+
+	ret = regmap_read(regmap, MLX90632_RAM_3(1), &read_tmp);
+	if (ret < 0)
+		return ret;
+	*ambient_new_raw = (s16)read_tmp;
+
+	ret = regmap_read(regmap, MLX90632_RAM_3(2), &read_tmp);
+	if (ret < 0)
+		return ret;
+	*ambient_old_raw = (s16)read_tmp;
+
+	return ret;
+}
+
+static int mlx90632_read_object_raw(struct regmap *regmap,
+				    int perform_measurement_ret,
+				    s16 *object_new_raw, s16 *object_old_raw)
+{
+	int ret;
+	unsigned int read_tmp;
+	s16 read;
+	u8 channel = 0;
+	u8 channel_old = 0;
+
+	ret = mlx90632_channel_new_select(perform_measurement_ret, &channel,
+					  &channel_old);
+	if (ret != 0)
+		return ret;
+
+	ret = regmap_read(regmap, MLX90632_RAM_2(channel), &read_tmp);
+	if (ret < 0)
+		return ret;
+
+	read = (s16)read_tmp;
+
+	ret = regmap_read(regmap, MLX90632_RAM_1(channel), &read_tmp);
+	if (ret < 0)
+		return ret;
+	*object_new_raw = (read + (s16)read_tmp) / 2;
+
+	ret = regmap_read(regmap, MLX90632_RAM_2(channel_old), &read_tmp);
+	if (ret < 0)
+		return ret;
+	read = (s16)read_tmp;
+
+	ret = regmap_read(regmap, MLX90632_RAM_1(channel_old), &read_tmp);
+	if (ret < 0)
+		return ret;
+	*object_old_raw = (read + (s16)read_tmp) / 2;
+
+	return ret;
+}
+
+static int mlx90632_read_all_channel(struct mlx90632_data *data,
+				     s16 *ambient_new_raw, s16 *ambient_old_raw,
+				     s16 *object_new_raw, s16 *object_old_raw)
+{
+	s32 ret, measurement;
+
+	mutex_lock(&data->lock);
+	measurement = mlx90632_perform_measurement(data);
+	if (measurement < 0) {
+		ret = measurement;
+		goto read_unlock;
+	}
+	ret = mlx90632_read_ambient_raw(data->regmap, ambient_new_raw,
+					ambient_old_raw);
+	if (ret < 0)
+		goto read_unlock;
+
+	ret = mlx90632_read_object_raw(data->regmap, measurement,
+				       object_new_raw, object_old_raw);
+read_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int mlx90632_read_ee_register(struct regmap *regmap, u16 reg_lsb,
+				     s32 *reg_value)
+{
+	s32 ret;
+	unsigned int read;
+	u32 value;
+
+	ret = regmap_read(regmap, reg_lsb, &read);
+	if (ret < 0)
+		return ret;
+
+	value = read;
+
+	ret = regmap_read(regmap, reg_lsb + 1, &read);
+	if (ret < 0)
+		return ret;
+
+	*reg_value = (read << 16) | (value & 0xffff);
+
+	return 0;
+}
+
+static s64 mlx90632_preprocess_temp_amb(s16 ambient_new_raw,
+					s16 ambient_old_raw, s16 Gb)
+{
+	s64 VR_Ta, kGb, tmp;
+
+	kGb = ((s64)Gb * 1000LL) >> 10ULL;
+	VR_Ta = (s64)ambient_old_raw * 1000000LL +
+		kGb * div64_s64(((s64)ambient_new_raw * 1000LL),
+			(MLX90632_REF_3));
+	tmp = div64_s64(
+			 div64_s64(((s64)ambient_new_raw * 1000000000000LL),
+				   (MLX90632_REF_3)), VR_Ta);
+	return div64_s64(tmp << 19ULL, 1000LL);
+}
+
+static s64 mlx90632_preprocess_temp_obj(s16 object_new_raw, s16 object_old_raw,
+					s16 ambient_new_raw,
+					s16 ambient_old_raw, s16 Ka)
+{
+	s64 VR_IR, kKa, tmp;
+
+	kKa = ((s64)Ka * 1000LL) >> 10ULL;
+	VR_IR = (s64)ambient_old_raw * 1000000LL +
+		kKa * div64_s64(((s64)ambient_new_raw * 1000LL),
+			(MLX90632_REF_3));
+	tmp = div64_s64(
+			div64_s64(((s64)((object_new_raw + object_old_raw) / 2)
+				   * 1000000000000LL), (MLX90632_REF_12)),
+			VR_IR);
+	return div64_s64((tmp << 19ULL), 1000LL);
+}
+
+static s32 mlx90632_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw,
+				      s32 P_T, s32 P_R, s32 P_G, s32 P_O,
+				      s16 Gb)
+{
+	s64 Asub, Bsub, Ablock, Bblock, Cblock, AMB, sum;
+
+	AMB = mlx90632_preprocess_temp_amb(ambient_new_raw, ambient_old_raw,
+					   Gb);
+	Asub = ((s64)P_T * 10000000000LL) >> 44ULL;
+	Bsub = AMB - (((s64)P_R * 1000LL) >> 8ULL);
+	Ablock = Asub * (Bsub * Bsub);
+	Bblock = (div64_s64(Bsub * 10000000LL, P_G)) << 20ULL;
+	Cblock = ((s64)P_O * 10000000000LL) >> 8ULL;
+
+	sum = div64_s64(Ablock, 1000000LL) + Bblock + Cblock;
+
+	return div64_s64(sum, 10000000LL);
+}
+
+static s32 mlx90632_calc_temp_object_iteration(s32 prev_object_temp, s64 object,
+					       s64 TAdut, s32 Fa, s32 Fb,
+					       s32 Ga, s16 Ha, s16 Hb,
+					       u16 emissivity)
+{
+	s64 calcedKsTO, calcedKsTA, ir_Alpha, TAdut4, Alpha_corr;
+	s64 Ha_customer, Hb_customer;
+
+	Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL;
+	Hb_customer = ((s64)Hb * 100) >> 10ULL;
+
+	calcedKsTO = ((s64)((s64)Ga * (prev_object_temp - 25 * 1000LL)
+			     * 1000LL)) >> 36LL;
+	calcedKsTA = ((s64)(Fb * (TAdut - 25 * 1000000LL))) >> 36LL;
+	Alpha_corr = div64_s64((((s64)(Fa * 10000000000LL) >> 46LL)
+				* Ha_customer), 1000LL);
+	Alpha_corr *= ((s64)(1 * 1000000LL + calcedKsTO + calcedKsTA));
+	Alpha_corr = emissivity * div64_s64(Alpha_corr, 100000LL);
+	Alpha_corr = div64_s64(Alpha_corr, 1000LL);
+	ir_Alpha = div64_s64((s64)object * 10000000LL, Alpha_corr);
+	TAdut4 = (div64_s64(TAdut, 10000LL) + 27315) *
+		(div64_s64(TAdut, 10000LL) + 27315) *
+		(div64_s64(TAdut, 10000LL)  + 27315) *
+		(div64_s64(TAdut, 10000LL) + 27315);
+
+	return (int_sqrt64(int_sqrt64(ir_Alpha * 1000000000000LL + TAdut4))
+		- 27315 - Hb_customer) * 10;
+}
+
+static s32 mlx90632_calc_temp_object(s64 object, s64 ambient, s32 Ea, s32 Eb,
+				     s32 Fa, s32 Fb, s32 Ga, s16 Ha, s16 Hb,
+				     u16 tmp_emi)
+{
+	s64 kTA, kTA0, TAdut;
+	s64 temp = 25000;
+	s8 i;
+
+	kTA = (Ea * 1000LL) >> 16LL;
+	kTA0 = (Eb * 1000LL) >> 8LL;
+	TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 25 * 1000000LL;
+
+	/* Iterations of calculation as described in datasheet */
+	for (i = 0; i < 5; ++i) {
+		temp = mlx90632_calc_temp_object_iteration(temp, object, TAdut,
+							   Fa, Fb, Ga, Ha, Hb,
+							   tmp_emi);
+	}
+	return temp;
+}
+
+static int mlx90632_calc_object_dsp105(struct mlx90632_data *data, int *val)
+{
+	s32 ret;
+	s32 Ea, Eb, Fa, Fb, Ga;
+	unsigned int read_tmp;
+	s16 Ha, Hb, Gb, Ka;
+	s16 ambient_new_raw, ambient_old_raw, object_new_raw, object_old_raw;
+	s64 object, ambient;
+
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Ea, &Ea);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Eb, &Eb);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Fa, &Fa);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Fb, &Fb);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_Ga, &Ga);
+	if (ret < 0)
+		return ret;
+	ret = regmap_read(data->regmap, MLX90632_EE_Ha, &read_tmp);
+	if (ret < 0)
+		return ret;
+	Ha = (s16)read_tmp;
+	ret = regmap_read(data->regmap, MLX90632_EE_Hb, &read_tmp);
+	if (ret < 0)
+		return ret;
+	Hb = (s16)read_tmp;
+	ret = regmap_read(data->regmap, MLX90632_EE_Gb, &read_tmp);
+	if (ret < 0)
+		return ret;
+	Gb = (s16)read_tmp;
+	ret = regmap_read(data->regmap, MLX90632_EE_Ka, &read_tmp);
+	if (ret < 0)
+		return ret;
+	Ka = (s16)read_tmp;
+
+	ret = mlx90632_read_all_channel(data,
+					&ambient_new_raw, &ambient_old_raw,
+					&object_new_raw, &object_old_raw);
+	if (ret < 0)
+		return ret;
+
+	ambient = mlx90632_preprocess_temp_amb(ambient_new_raw,
+					       ambient_old_raw, Gb);
+	object = mlx90632_preprocess_temp_obj(object_new_raw,
+					      object_old_raw,
+					      ambient_new_raw,
+					      ambient_old_raw, Ka);
+
+	*val = mlx90632_calc_temp_object(object, ambient, Ea, Eb, Fa, Fb, Ga,
+					 Ha, Hb, data->emissivity);
+	return 0;
+}
+
+static int mlx90632_calc_ambient_dsp105(struct mlx90632_data *data, int *val)
+{
+	s32 ret;
+	unsigned int read_tmp;
+	s32 PT, PR, PG, PO;
+	s16 Gb;
+	s16 ambient_new_raw, ambient_old_raw;
+
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_P_R, &PR);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_P_G, &PG);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_P_T, &PT);
+	if (ret < 0)
+		return ret;
+	ret = mlx90632_read_ee_register(data->regmap, MLX90632_EE_P_O, &PO);
+	if (ret < 0)
+		return ret;
+	ret = regmap_read(data->regmap, MLX90632_EE_Gb, &read_tmp);
+	if (ret < 0)
+		return ret;
+	Gb = (s16)read_tmp;
+
+	ret = mlx90632_read_ambient_raw(data->regmap, &ambient_new_raw,
+					&ambient_old_raw);
+	if (ret < 0)
+		return ret;
+	*val = mlx90632_calc_temp_ambient(ambient_new_raw, ambient_old_raw,
+					  PT, PR, PG, PO, Gb);
+	return ret;
+}
+
+static int mlx90632_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *channel, int *val,
+			     int *val2, long mask)
+{
+	struct mlx90632_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		switch (channel->channel2) {
+		case IIO_MOD_TEMP_AMBIENT:
+			ret = mlx90632_calc_ambient_dsp105(data, val);
+			if (ret < 0)
+				return ret;
+			return IIO_VAL_INT;
+		case IIO_MOD_TEMP_OBJECT:
+			ret = mlx90632_calc_object_dsp105(data, val);
+			if (ret < 0)
+				return ret;
+			return IIO_VAL_INT;
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_CALIBEMISSIVITY:
+		if (data->emissivity == 1000) {
+			*val = 1;
+			*val2 = 0;
+		} else {
+			*val = 0;
+			*val2 = data->emissivity * 1000;
+		}
+		return IIO_VAL_INT_PLUS_MICRO;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int mlx90632_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *channel, int val,
+			      int val2, long mask)
+{
+	struct mlx90632_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBEMISSIVITY:
+		/* Confirm we are within 0 and 1.0 */
+		if (val < 0 || val2 < 0 || val > 1 ||
+		    (val == 1 && val2 != 0))
+			return -EINVAL;
+		data->emissivity = val * 1000 + val2 / 1000;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_chan_spec mlx90632_channels[] = {
+	{
+		.type = IIO_TEMP,
+		.modified = 1,
+		.channel2 = IIO_MOD_TEMP_AMBIENT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+	},
+	{
+		.type = IIO_TEMP,
+		.modified = 1,
+		.channel2 = IIO_MOD_TEMP_OBJECT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+			BIT(IIO_CHAN_INFO_CALIBEMISSIVITY),
+	},
+};
+
+static const struct iio_info mlx90632_info = {
+	.read_raw = mlx90632_read_raw,
+	.write_raw = mlx90632_write_raw,
+};
+
+static int mlx90632_sleep(struct mlx90632_data *data)
+{
+	regcache_mark_dirty(data->regmap);
+
+	dev_dbg(&data->client->dev, "Requesting sleep");
+	return mlx90632_pwr_set_sleep_step(data->regmap);
+}
+
+static int mlx90632_wakeup(struct mlx90632_data *data)
+{
+	int ret;
+
+	ret = regcache_sync(data->regmap);
+	if (ret < 0) {
+		dev_err(&data->client->dev,
+			"Failed to sync regmap registers: %d\n", ret);
+		return ret;
+	}
+
+	dev_dbg(&data->client->dev, "Requesting wake-up\n");
+	return mlx90632_pwr_continuous(data->regmap);
+}
+
+static int mlx90632_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct iio_dev *indio_dev;
+	struct mlx90632_data *mlx90632;
+	struct regmap *regmap;
+	int ret;
+	unsigned int read;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90632));
+	if (!indio_dev) {
+		dev_err(&client->dev, "Failed to allocate device\n");
+		return -ENOMEM;
+	}
+
+	regmap = devm_regmap_init_i2c(client, &mlx90632_regmap);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(&client->dev, "Failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
+	mlx90632 = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+	mlx90632->client = client;
+	mlx90632->regmap = regmap;
+
+	mutex_init(&mlx90632->lock);
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = id->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &mlx90632_info;
+	indio_dev->channels = mlx90632_channels;
+	indio_dev->num_channels = ARRAY_SIZE(mlx90632_channels);
+
+	ret = mlx90632_wakeup(mlx90632);
+	if (ret < 0) {
+		dev_err(&client->dev, "Wakeup failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_read(mlx90632->regmap, MLX90632_EE_VERSION, &read);
+	if (ret < 0) {
+		dev_err(&client->dev, "read of version failed: %d\n", ret);
+		return ret;
+	}
+	if (read == MLX90632_ID_MEDICAL) {
+		dev_dbg(&client->dev,
+			"Detected Medical EEPROM calibration %x\n", read);
+	} else if (read == MLX90632_ID_CONSUMER) {
+		dev_dbg(&client->dev,
+			"Detected Consumer EEPROM calibration %x\n", read);
+	} else {
+		dev_err(&client->dev,
+			"EEPROM version mismatch %x (expected %x or %x)\n",
+			read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL);
+		return -EPROTONOSUPPORT;
+	}
+
+	mlx90632->emissivity = 1000;
+
+	pm_runtime_disable(&client->dev);
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret < 0) {
+		mlx90632_sleep(mlx90632);
+		return ret;
+	}
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, MLX90632_SLEEP_DELAY_MS);
+	pm_runtime_use_autosuspend(&client->dev);
+
+	return iio_device_register(indio_dev);
+}
+
+static int mlx90632_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct mlx90632_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	mlx90632_sleep(data);
+
+	return 0;
+}
+
+static const struct i2c_device_id mlx90632_id[] = {
+	{ "mlx90632", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, mlx90632_id);
+
+static const struct of_device_id mlx90632_of_match[] = {
+	{ .compatible = "melexis,mlx90632" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mlx90632_of_match);
+
+static int __maybe_unused mlx90632_pm_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct mlx90632_data *data = iio_priv(indio_dev);
+
+	return mlx90632_sleep(data);
+}
+
+static int __maybe_unused mlx90632_pm_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct mlx90632_data *data = iio_priv(indio_dev);
+
+	return mlx90632_wakeup(data);
+}
+
+static UNIVERSAL_DEV_PM_OPS(mlx90632_pm_ops, mlx90632_pm_suspend,
+			    mlx90632_pm_resume, NULL);
+
+static struct i2c_driver mlx90632_driver = {
+	.driver = {
+		.name	= "mlx90632",
+		.of_match_table = mlx90632_of_match,
+		.pm	= &mlx90632_pm_ops,
+	},
+	.probe = mlx90632_probe,
+	.remove = mlx90632_remove,
+	.id_table = mlx90632_id,
+};
+module_i2c_driver(mlx90632_driver);
+
+MODULE_AUTHOR("Crt Mori <cmo@melexis.com>");
+MODULE_DESCRIPTION("Melexis MLX90632 contactless Infra Red temperature sensor driver");
+MODULE_LICENSE("GPL v2");

+ 6 - 0
drivers/irqchip/Kconfig

@@ -51,6 +51,12 @@ config ARM_GIC_V3_ITS_PCI
 	depends on PCI_MSI
 	default ARM_GIC_V3_ITS
 
+config ARM_GIC_V3_ITS_FSL_MC
+	bool
+	depends on ARM_GIC_V3_ITS
+	depends on FSL_MC_BUS
+	default ARM_GIC_V3_ITS
+
 config ARM_NVIC
 	bool
 	select IRQ_DOMAIN

+ 1 - 0
drivers/irqchip/Makefile

@@ -30,6 +30,7 @@ obj-$(CONFIG_ARM_GIC_V2M)		+= irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-common.o
 obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
 obj-$(CONFIG_ARM_GIC_V3_ITS_PCI)	+= irq-gic-v3-its-pci-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC)	+= irq-gic-v3-its-fsl-mc-msi.o
 obj-$(CONFIG_PARTITION_PERCPU)		+= irq-partition-percpu.o
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)	+= irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o

+ 1 - 3
drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c → drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c

@@ -13,7 +13,7 @@
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
-#include "../include/mc.h"
+#include <linux/fsl/mc.h>
 
 static struct irq_chip its_msi_irq_chip = {
 	.name = "ITS-fMSI",
@@ -43,9 +43,7 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
 	 * NOTE: This device id corresponds to the IOMMU stream ID
 	 * associated with the DPRC object (ICID).
 	 */
-#ifdef GENERIC_MSI_DOMAIN_OPS
 	info->scratchpad[0].ul = mc_bus_dev->icid;
-#endif
 	msi_info = msi_get_domain_info(msi_domain->parent);
 	return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
 }

+ 14 - 4
drivers/staging/Kconfig

@@ -24,8 +24,6 @@ menuconfig STAGING
 
 if STAGING
 
-source "drivers/staging/irda/net/Kconfig"
-
 source "drivers/staging/ipx/Kconfig"
 
 source "drivers/staging/ncpfs/Kconfig"
@@ -114,12 +112,24 @@ source "drivers/staging/greybus/Kconfig"
 
 source "drivers/staging/vc04_services/Kconfig"
 
-source "drivers/staging/ccree/Kconfig"
-
 source "drivers/staging/typec/Kconfig"
 
 source "drivers/staging/vboxvideo/Kconfig"
 
 source "drivers/staging/pi433/Kconfig"
 
+source "drivers/staging/mt7621-pinctrl/Kconfig"
+
+source "drivers/staging/mt7621-gpio/Kconfig"
+
+source "drivers/staging/mt7621-spi/Kconfig"
+
+source "drivers/staging/mt7621-dma/Kconfig"
+
+source "drivers/staging/mt7621-mmc/Kconfig"
+
+source "drivers/staging/mt7621-eth/Kconfig"
+
+source "drivers/staging/mt7621-dts/Kconfig"
+
 endif # STAGING

+ 8 - 3
drivers/staging/Makefile

@@ -5,8 +5,6 @@ obj-y				+= media/
 obj-y				+= typec/
 obj-$(CONFIG_IPX)		+= ipx/
 obj-$(CONFIG_NCP_FS)		+= ncpfs/
-obj-$(CONFIG_IRDA)		+= irda/net/
-obj-$(CONFIG_IRDA)		+= irda/drivers/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_COMEDI)		+= comedi/
 obj-$(CONFIG_FB_OLPC_DCON)	+= olpc_dcon/
@@ -49,6 +47,13 @@ obj-$(CONFIG_MOST)		+= most/
 obj-$(CONFIG_KS7010)		+= ks7010/
 obj-$(CONFIG_GREYBUS)		+= greybus/
 obj-$(CONFIG_BCM2835_VCHIQ)	+= vc04_services/
-obj-$(CONFIG_CRYPTO_DEV_CCREE)	+= ccree/
 obj-$(CONFIG_DRM_VBOXVIDEO)	+= vboxvideo/
 obj-$(CONFIG_PI433)		+= pi433/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-pci/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-pinctrl/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-gpio/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-spi/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-dma/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-mmc/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-eth/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-dts/

+ 1 - 1
drivers/staging/android/ashmem.c

@@ -321,7 +321,7 @@ out_unlock:
 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 {
 	struct ashmem_area *asma = file->private_data;
-	int ret;
+	loff_t ret;
 
 	mutex_lock(&ashmem_mutex);
 

+ 1 - 1
drivers/staging/android/ion/Kconfig

@@ -4,7 +4,7 @@ menuconfig ION
 	select GENERIC_ALLOCATOR
 	select DMA_SHARED_BUFFER
 	---help---
-	  Chose this option to enable the ION Memory Manager,
+	  Choose this option to enable the ION Memory Manager,
 	  used by Android to efficiently allocate buffers
 	  from userspace that can be shared between drivers.
 	  If you're not using Android its probably safe to

+ 3 - 23
drivers/staging/android/ion/ion.c

@@ -33,11 +33,6 @@
 static struct ion_device *internal_dev;
 static int heap_id;
 
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
-	return !!(buffer->flags & ION_FLAG_CACHED);
-}
-
 /* this function should only be called while dev->lock is held */
 static void ion_buffer_add(struct ion_device *dev,
 			   struct ion_buffer *buffer)
@@ -187,7 +182,7 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
 	new_sg = new_table->sgl;
 	for_each_sg(table->sgl, sg, table->nents, i) {
 		memcpy(new_sg, sg, sizeof(*sg));
-		sg->dma_address = 0;
+		new_sg->dma_address = 0;
 		new_sg = sg_next(new_sg);
 	}
 
@@ -527,7 +522,6 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 
 void ion_device_add_heap(struct ion_heap *heap)
 {
-	struct dentry *debug_file;
 	struct ion_device *dev = internal_dev;
 	int ret;
 
@@ -561,16 +555,8 @@ void ion_device_add_heap(struct ion_heap *heap)
 		char debug_name[64];
 
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
-		debug_file = debugfs_create_file(debug_name,
-						 0644, dev->debug_root, heap,
-						 &debug_shrink_fops);
-		if (!debug_file) {
-			char buf[256], *path;
-
-			path = dentry_path(dev->debug_root, buf, 256);
-			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
-			       path, debug_name);
-		}
+		debugfs_create_file(debug_name, 0644, dev->debug_root,
+				    heap, &debug_shrink_fops);
 	}
 
 	dev->heap_cnt++;
@@ -599,12 +585,6 @@ static int ion_device_create(void)
 	}
 
 	idev->debug_root = debugfs_create_dir("ion", NULL);
-	if (!idev->debug_root) {
-		pr_err("ion: failed to create debugfs root directory.\n");
-		goto debugfs_done;
-	}
-
-debugfs_done:
 	idev->buffers = RB_ROOT;
 	mutex_init(&idev->buffer_lock);
 	init_rwsem(&idev->lock);

+ 1 - 21
drivers/staging/android/ion/ion.h

@@ -184,23 +184,6 @@ struct ion_heap {
 			  void *unused);
 };
 
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer:		buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer:		buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
 /**
  * ion_device_add_heap - adds a heap to the ion device
  * @heap:		the heap to add
@@ -311,7 +294,6 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
  * @gfp_mask:		gfp_mask to use from alloc
  * @order:		order of pages in the pool
  * @list:		plist node for list of pools
- * @cached:		it's cached pool or not
  *
  * Allows you to keep a pool of pre allocated pages to use from your heap.
  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -321,7 +303,6 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
 struct ion_page_pool {
 	int high_count;
 	int low_count;
-	bool cached;
 	struct list_head high_items;
 	struct list_head low_items;
 	struct mutex mutex;
@@ -330,8 +311,7 @@ struct ion_page_pool {
 	struct plist_node list;
 };
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
-					   bool cached);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
 void ion_page_pool_destroy(struct ion_page_pool *pool);
 struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);

+ 5 - 28
drivers/staging/android/ion/ion_page_pool.c

@@ -5,24 +5,15 @@
  * Copyright (C) 2011 Google, Inc.
  */
 
-#include <linux/debugfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/fs.h>
 #include <linux/list.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 
 #include "ion.h"
 
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
-	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
-
-	if (!page)
-		return NULL;
-	return page;
+	return alloc_pages(pool->gfp_mask, pool->order);
 }
 
 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
@@ -31,7 +22,7 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool,
 	__free_pages(page, pool->order);
 }
 
-static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
 {
 	mutex_lock(&pool->mutex);
 	if (PageHighMem(page)) {
@@ -42,7 +33,6 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
 		pool->low_count++;
 	}
 	mutex_unlock(&pool->mutex);
-	return 0;
 }
 
 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
@@ -84,13 +74,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
 
 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
 {
-	int ret;
-
 	BUG_ON(pool->order != compound_order(page));
 
-	ret = ion_page_pool_add(pool, page);
-	if (ret)
-		ion_page_pool_free_pages(pool, page);
+	ion_page_pool_add(pool, page);
 }
 
 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
@@ -137,8 +123,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 	return freed;
 }
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
-					   bool cached)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
 {
 	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
 
@@ -152,8 +137,6 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
 	pool->order = order;
 	mutex_init(&pool->mutex);
 	plist_node_init(&pool->list, order);
-	if (cached)
-		pool->cached = true;
 
 	return pool;
 }
@@ -162,9 +145,3 @@ void ion_page_pool_destroy(struct ion_page_pool *pool)
 {
 	kfree(pool);
 }
-
-static int __init ion_page_pool_init(void)
-{
-	return 0;
-}
-device_initcall(ion_page_pool_init);

+ 14 - 62
drivers/staging/android/ion/ion_system_heap.c

@@ -41,31 +41,16 @@ static inline unsigned int order_to_size(int order)
 
 struct ion_system_heap {
 	struct ion_heap heap;
-	struct ion_page_pool *uncached_pools[NUM_ORDERS];
-	struct ion_page_pool *cached_pools[NUM_ORDERS];
+	struct ion_page_pool *pools[NUM_ORDERS];
 };
 
-/**
- * The page from page-pool are all zeroed before. We need do cache
- * clean for cached buffer. The uncached buffer are always non-cached
- * since it's allocated. So no need for non-cached pages.
- */
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
 				      unsigned long order)
 {
-	bool cached = ion_buffer_cached(buffer);
-	struct ion_page_pool *pool;
-	struct page *page;
+	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
 
-	if (!cached)
-		pool = heap->uncached_pools[order_to_index(order)];
-	else
-		pool = heap->cached_pools[order_to_index(order)];
-
-	page = ion_page_pool_alloc(pool);
-
-	return page;
+	return ion_page_pool_alloc(pool);
 }
 
 static void free_buffer_page(struct ion_system_heap *heap,
@@ -73,7 +58,6 @@ static void free_buffer_page(struct ion_system_heap *heap,
 {
 	struct ion_page_pool *pool;
 	unsigned int order = compound_order(page);
-	bool cached = ion_buffer_cached(buffer);
 
 	/* go to system */
 	if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
@@ -81,10 +65,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
 		return;
 	}
 
-	if (!cached)
-		pool = heap->uncached_pools[order_to_index(order)];
-	else
-		pool = heap->cached_pools[order_to_index(order)];
+	pool = heap->pools[order_to_index(order)];
 
 	ion_page_pool_free(pool, page);
 }
@@ -190,8 +171,7 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 				  int nr_to_scan)
 {
-	struct ion_page_pool *uncached_pool;
-	struct ion_page_pool *cached_pool;
+	struct ion_page_pool *pool;
 	struct ion_system_heap *sys_heap;
 	int nr_total = 0;
 	int i, nr_freed;
@@ -203,26 +183,15 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 		only_scan = 1;
 
 	for (i = 0; i < NUM_ORDERS; i++) {
-		uncached_pool = sys_heap->uncached_pools[i];
-		cached_pool = sys_heap->cached_pools[i];
+		pool = sys_heap->pools[i];
 
 		if (only_scan) {
-			nr_total += ion_page_pool_shrink(uncached_pool,
+			nr_total += ion_page_pool_shrink(pool,
 							 gfp_mask,
 							 nr_to_scan);
 
-			nr_total += ion_page_pool_shrink(cached_pool,
-							 gfp_mask,
-							 nr_to_scan);
 		} else {
-			nr_freed = ion_page_pool_shrink(uncached_pool,
-							gfp_mask,
-							nr_to_scan);
-			nr_to_scan -= nr_freed;
-			nr_total += nr_freed;
-			if (nr_to_scan <= 0)
-				break;
-			nr_freed = ion_page_pool_shrink(cached_pool,
+			nr_freed = ion_page_pool_shrink(pool,
 							gfp_mask,
 							nr_to_scan);
 			nr_to_scan -= nr_freed;
@@ -253,26 +222,16 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
 	struct ion_page_pool *pool;
 
 	for (i = 0; i < NUM_ORDERS; i++) {
-		pool = sys_heap->uncached_pools[i];
+		pool = sys_heap->pools[i];
 
-		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+		seq_printf(s, "%d order %u highmem pages %lu total\n",
 			   pool->high_count, pool->order,
 			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+		seq_printf(s, "%d order %u lowmem pages %lu total\n",
 			   pool->low_count, pool->order,
 			   (PAGE_SIZE << pool->order) * pool->low_count);
 	}
 
-	for (i = 0; i < NUM_ORDERS; i++) {
-		pool = sys_heap->cached_pools[i];
-
-		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
-			   pool->high_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
-			   pool->low_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->low_count);
-	}
 	return 0;
 }
 
@@ -285,8 +244,7 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 			ion_page_pool_destroy(pools[i]);
 }
 
-static int ion_system_heap_create_pools(struct ion_page_pool **pools,
-					bool cached)
+static int ion_system_heap_create_pools(struct ion_page_pool **pools)
 {
 	int i;
 	gfp_t gfp_flags = low_order_gfp_flags;
@@ -297,7 +255,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools,
 		if (orders[i] > 4)
 			gfp_flags = high_order_gfp_flags;
 
-		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+		pool = ion_page_pool_create(gfp_flags, orders[i]);
 		if (!pool)
 			goto err_create_pool;
 		pools[i] = pool;
@@ -320,18 +278,12 @@ static struct ion_heap *__ion_system_heap_create(void)
 	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
 
-	if (ion_system_heap_create_pools(heap->uncached_pools, false))
+	if (ion_system_heap_create_pools(heap->pools))
 		goto free_heap;
 
-	if (ion_system_heap_create_pools(heap->cached_pools, true))
-		goto destroy_uncached_pools;
-
 	heap->heap.debug_show = ion_system_heap_debug_show;
 	return &heap->heap;
 
-destroy_uncached_pools:
-	ion_system_heap_destroy_pools(heap->uncached_pools);
-
 free_heap:
 	kfree(heap);
 	return ERR_PTR(-ENOMEM);

+ 0 - 27
drivers/staging/ccree/Kconfig

@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config CRYPTO_DEV_CCREE_OLD
-	tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
-	depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA && BROKEN
-	default n
-	select CRYPTO_HASH
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_DES
-	select CRYPTO_AEAD
-	select CRYPTO_AUTHENC
-	select CRYPTO_SHA1
-	select CRYPTO_MD5
-	select CRYPTO_SHA256
-	select CRYPTO_SHA512
-	select CRYPTO_HMAC
-	select CRYPTO_AES
-	select CRYPTO_CBC
-	select CRYPTO_ECB
-	select CRYPTO_CTR
-	select CRYPTO_XTS
-	help
-	  Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
-	  C7xx. Currently only the CryptoCell 712 REE is supported.
-	  Choose this if you wish to use hardware acceleration of
-	  cryptographic operations on the system REE.
-	  If unsure say Y.

+ 0 - 7
drivers/staging/ccree/Makefile

@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_CRYPTO_DEV_CCREE_OLD) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
-ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
-ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
-ccree-$(CONFIG_PM) += cc_pm.o

+ 0 - 10
drivers/staging/ccree/TODO

@@ -1,10 +0,0 @@
-
-
-*************************************************************************
-*									*
-* Arm Trust Zone CryptoCell REE Linux driver upstreaming TODO items	*
-*									*
-*************************************************************************
-
-1. ???
-

+ 0 - 2701
drivers/staging/ccree/cc_aead.c

@@ -1,2701 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/des.h>
-#include <linux/rtnetlink.h>
-#include "cc_driver.h"
-#include "cc_buffer_mgr.h"
-#include "cc_aead.h"
-#include "cc_request_mgr.h"
-#include "cc_hash.h"
-#include "cc_sram_mgr.h"
-
-#define template_aead	template_u.aead
-
-#define MAX_AEAD_SETKEY_SEQ 12
-#define MAX_AEAD_PROCESS_SEQ 23
-
-#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
-#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-
-#define AES_CCM_RFC4309_NONCE_SIZE 3
-#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
-struct cc_aead_handle {
-	cc_sram_addr_t sram_workspace_addr;
-	struct list_head aead_list;
-};
-
-struct cc_hmac_s {
-	u8 *padded_authkey;
-	u8 *ipad_opad; /* IPAD, OPAD*/
-	dma_addr_t padded_authkey_dma_addr;
-	dma_addr_t ipad_opad_dma_addr;
-};
-
-struct cc_xcbc_s {
-	u8 *xcbc_keys; /* K1,K2,K3 */
-	dma_addr_t xcbc_keys_dma_addr;
-};
-
-struct cc_aead_ctx {
-	struct cc_drvdata *drvdata;
-	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
-	u8 *enckey;
-	dma_addr_t enckey_dma_addr;
-	union {
-		struct cc_hmac_s hmac;
-		struct cc_xcbc_s xcbc;
-	} auth_state;
-	unsigned int enc_keylen;
-	unsigned int auth_keylen;
-	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
-	enum drv_cipher_mode cipher_mode;
-	enum cc_flow_mode flow_mode;
-	enum drv_hash_mode auth_mode;
-};
-
-static inline bool valid_assoclen(struct aead_request *req)
-{
-	return ((req->assoclen == 16) || (req->assoclen == 20));
-}
-
-static void cc_aead_exit(struct crypto_aead *tfm)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
-		crypto_tfm_alg_name(&tfm->base));
-
-	/* Unmap enckey buffer */
-	if (ctx->enckey) {
-		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
-				  ctx->enckey_dma_addr);
-		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
-			&ctx->enckey_dma_addr);
-		ctx->enckey_dma_addr = 0;
-		ctx->enckey = NULL;
-	}
-
-	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
-		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
-
-		if (xcbc->xcbc_keys) {
-			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
-					  xcbc->xcbc_keys,
-					  xcbc->xcbc_keys_dma_addr);
-		}
-		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
-			&xcbc->xcbc_keys_dma_addr);
-		xcbc->xcbc_keys_dma_addr = 0;
-		xcbc->xcbc_keys = NULL;
-	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
-		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
-
-		if (hmac->ipad_opad) {
-			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
-					  hmac->ipad_opad,
-					  hmac->ipad_opad_dma_addr);
-			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
-				&hmac->ipad_opad_dma_addr);
-			hmac->ipad_opad_dma_addr = 0;
-			hmac->ipad_opad = NULL;
-		}
-		if (hmac->padded_authkey) {
-			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
-					  hmac->padded_authkey,
-					  hmac->padded_authkey_dma_addr);
-			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
-				&hmac->padded_authkey_dma_addr);
-			hmac->padded_authkey_dma_addr = 0;
-			hmac->padded_authkey = NULL;
-		}
-	}
-}
-
-static int cc_aead_init(struct crypto_aead *tfm)
-{
-	struct aead_alg *alg = crypto_aead_alg(tfm);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct cc_crypto_alg *cc_alg =
-			container_of(alg, struct cc_crypto_alg, aead_alg);
-	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
-
-	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
-		crypto_tfm_alg_name(&tfm->base));
-
-	/* Initialize modes in instance */
-	ctx->cipher_mode = cc_alg->cipher_mode;
-	ctx->flow_mode = cc_alg->flow_mode;
-	ctx->auth_mode = cc_alg->auth_mode;
-	ctx->drvdata = cc_alg->drvdata;
-	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
-
-	/* Allocate key buffer, cache line aligned */
-	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
-					 &ctx->enckey_dma_addr, GFP_KERNEL);
-	if (!ctx->enckey) {
-		dev_err(dev, "Failed allocating key buffer\n");
-		goto init_failed;
-	}
-	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
-		ctx->enckey);
-
-	/* Set default authlen value */
-
-	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
-		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
-		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
-
-		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
-		/* (and temporary for user key - up to 256b) */
-		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
-						     &xcbc->xcbc_keys_dma_addr,
-						     GFP_KERNEL);
-		if (!xcbc->xcbc_keys) {
-			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
-			goto init_failed;
-		}
-	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
-		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
-		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
-		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
-
-		/* Allocate dma-coherent buffer for IPAD + OPAD */
-		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
-						     &hmac->ipad_opad_dma_addr,
-						     GFP_KERNEL);
-
-		if (!hmac->ipad_opad) {
-			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
-			goto init_failed;
-		}
-
-		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
-			hmac->ipad_opad);
-
-		hmac->padded_authkey = dma_alloc_coherent(dev,
-							  MAX_HMAC_BLOCK_SIZE,
-							  pkey_dma,
-							  GFP_KERNEL);
-
-		if (!hmac->padded_authkey) {
-			dev_err(dev, "failed to allocate padded_authkey\n");
-			goto init_failed;
-		}
-	} else {
-		ctx->auth_state.hmac.ipad_opad = NULL;
-		ctx->auth_state.hmac.padded_authkey = NULL;
-	}
-
-	return 0;
-
-init_failed:
-	cc_aead_exit(tfm);
-	return -ENOMEM;
-}
-
-static void cc_aead_complete(struct device *dev, void *cc_req, int err)
-{
-	struct aead_request *areq = (struct aead_request *)cc_req;
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-	cc_unmap_aead_request(dev, areq);
-
-	/* Restore ordinary iv pointer */
-	areq->iv = areq_ctx->backup_iv;
-
-	if (err)
-		goto done;
-
-	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
-			   ctx->authsize) != 0) {
-			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
-				ctx->authsize, ctx->cipher_mode);
-			/* In case of payload authentication failure, MUST NOT
-			 * revealed the decrypted message --> zero its memory.
-			 */
-			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
-			err = -EBADMSG;
-		}
-	} else { /*ENCRYPT*/
-		if (areq_ctx->is_icv_fragmented) {
-			u32 skip = areq->cryptlen + areq_ctx->dst_offset;
-
-			cc_copy_sg_portion(dev, areq_ctx->mac_buf,
-					   areq_ctx->dst_sgl, skip,
-					   (skip + ctx->authsize),
-					   CC_SG_FROM_BUF);
-		}
-
-		/* If an IV was generated, copy it back to the user provided
-		 * buffer.
-		 */
-		if (areq_ctx->backup_giv) {
-			if (ctx->cipher_mode == DRV_CIPHER_CTR)
-				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
-				       CTR_RFC3686_NONCE_SIZE,
-				       CTR_RFC3686_IV_SIZE);
-			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
-				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
-				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
-		}
-	}
-done:
-	aead_request_complete(areq, err);
-}
-
-static int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
-{
-	/* Load the AES key */
-	hw_desc_init(&desc[0]);
-	/* We are using for the source/user key the same buffer
-	 * as for the output keys, * because after this key loading it
-	 * is not needed anymore
-	 */
-	set_din_type(&desc[0], DMA_DLLI,
-		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
-		     NS_BIT);
-	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
-	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_key_size_aes(&desc[0], ctx->auth_keylen);
-	set_flow_mode(&desc[0], S_DIN_to_AES);
-	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
-
-	hw_desc_init(&desc[1]);
-	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[1], DIN_AES_DOUT);
-	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
-		      AES_KEYSIZE_128, NS_BIT, 0);
-
-	hw_desc_init(&desc[2]);
-	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[2], DIN_AES_DOUT);
-	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
-					 + AES_KEYSIZE_128),
-			      AES_KEYSIZE_128, NS_BIT, 0);
-
-	hw_desc_init(&desc[3]);
-	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[3], DIN_AES_DOUT);
-	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
-					  + 2 * AES_KEYSIZE_128),
-			      AES_KEYSIZE_128, NS_BIT, 0);
-
-	return 4;
-}
-
-static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
-{
-	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
-	unsigned int digest_ofs = 0;
-	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
-			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
-			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
-	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
-
-	int idx = 0;
-	int i;
-
-	/* calc derived HMAC key */
-	for (i = 0; i < 2; i++) {
-		/* Load hash initial state */
-		hw_desc_init(&desc[idx]);
-		set_cipher_mode(&desc[idx], hash_mode);
-		set_din_sram(&desc[idx],
-			     cc_larval_digest_addr(ctx->drvdata,
-						   ctx->auth_mode),
-			     digest_size);
-		set_flow_mode(&desc[idx], S_DIN_to_HASH);
-		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-		idx++;
-
-		/* Load the hash current length*/
-		hw_desc_init(&desc[idx]);
-		set_cipher_mode(&desc[idx], hash_mode);
-		set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
-		set_flow_mode(&desc[idx], S_DIN_to_HASH);
-		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-		idx++;
-
-		/* Prepare ipad key */
-		hw_desc_init(&desc[idx]);
-		set_xor_val(&desc[idx], hmac_pad_const[i]);
-		set_cipher_mode(&desc[idx], hash_mode);
-		set_flow_mode(&desc[idx], S_DIN_to_HASH);
-		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-		idx++;
-
-		/* Perform HASH update */
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_DLLI,
-			     hmac->padded_authkey_dma_addr,
-			     SHA256_BLOCK_SIZE, NS_BIT);
-		set_cipher_mode(&desc[idx], hash_mode);
-		set_xor_active(&desc[idx]);
-		set_flow_mode(&desc[idx], DIN_HASH);
-		idx++;
-
-		/* Get the digset */
-		hw_desc_init(&desc[idx]);
-		set_cipher_mode(&desc[idx], hash_mode);
-		set_dout_dlli(&desc[idx],
-			      (hmac->ipad_opad_dma_addr + digest_ofs),
-			      digest_size, NS_BIT, 0);
-		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
-		idx++;
-
-		digest_ofs += digest_size;
-	}
-
-	return idx;
-}
-
-static int validate_keys_sizes(struct cc_aead_ctx *ctx)
-{
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
-		ctx->enc_keylen, ctx->auth_keylen);
-
-	switch (ctx->auth_mode) {
-	case DRV_HASH_SHA1:
-	case DRV_HASH_SHA256:
-		break;
-	case DRV_HASH_XCBC_MAC:
-		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
-		    ctx->auth_keylen != AES_KEYSIZE_192 &&
-		    ctx->auth_keylen != AES_KEYSIZE_256)
-			return -ENOTSUPP;
-		break;
-	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
-		if (ctx->auth_keylen > 0)
-			return -EINVAL;
-		break;
-	default:
-		dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
-		return -EINVAL;
-	}
-	/* Check cipher key size */
-	if (ctx->flow_mode == S_DIN_to_DES) {
-		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
-			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
-				ctx->enc_keylen);
-			return -EINVAL;
-		}
-	} else { /* Default assumed to be AES ciphers */
-		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
-		    ctx->enc_keylen != AES_KEYSIZE_192 &&
-		    ctx->enc_keylen != AES_KEYSIZE_256) {
-			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
-				ctx->enc_keylen);
-			return -EINVAL;
-		}
-	}
-
-	return 0; /* All tests of keys sizes passed */
-}
-
-/* This function prepers the user key so it can pass to the hmac processing
- * (copy to intenral buffer or hash in case of key longer than block
- */
-static int
-cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
-		      unsigned int keylen)
-{
-	dma_addr_t key_dma_addr = 0;
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
-	struct cc_crypto_req cc_req = {};
-	unsigned int blocksize;
-	unsigned int digestsize;
-	unsigned int hashmode;
-	unsigned int idx = 0;
-	int rc = 0;
-	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
-	dma_addr_t padded_authkey_dma_addr =
-		ctx->auth_state.hmac.padded_authkey_dma_addr;
-
-	switch (ctx->auth_mode) { /* auth_key required and >0 */
-	case DRV_HASH_SHA1:
-		blocksize = SHA1_BLOCK_SIZE;
-		digestsize = SHA1_DIGEST_SIZE;
-		hashmode = DRV_HASH_HW_SHA1;
-		break;
-	case DRV_HASH_SHA256:
-	default:
-		blocksize = SHA256_BLOCK_SIZE;
-		digestsize = SHA256_DIGEST_SIZE;
-		hashmode = DRV_HASH_HW_SHA256;
-	}
-
-	if (keylen != 0) {
-		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
-					      DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, key_dma_addr)) {
-			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
-				key, keylen);
-			return -ENOMEM;
-		}
-		if (keylen > blocksize) {
-			/* Load hash initial state */
-			hw_desc_init(&desc[idx]);
-			set_cipher_mode(&desc[idx], hashmode);
-			set_din_sram(&desc[idx], larval_addr, digestsize);
-			set_flow_mode(&desc[idx], S_DIN_to_HASH);
-			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-			idx++;
-
-			/* Load the hash current length*/
-			hw_desc_init(&desc[idx]);
-			set_cipher_mode(&desc[idx], hashmode);
-			set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
-			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
-			set_flow_mode(&desc[idx], S_DIN_to_HASH);
-			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-			idx++;
-
-			hw_desc_init(&desc[idx]);
-			set_din_type(&desc[idx], DMA_DLLI,
-				     key_dma_addr, keylen, NS_BIT);
-			set_flow_mode(&desc[idx], DIN_HASH);
-			idx++;
-
-			/* Get hashed key */
-			hw_desc_init(&desc[idx]);
-			set_cipher_mode(&desc[idx], hashmode);
-			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
-				      digestsize, NS_BIT, 0);
-			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
-			set_cipher_config0(&desc[idx],
-					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-			idx++;
-
-			hw_desc_init(&desc[idx]);
-			set_din_const(&desc[idx], 0, (blocksize - digestsize));
-			set_flow_mode(&desc[idx], BYPASS);
-			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
-				      digestsize), (blocksize - digestsize),
-				      NS_BIT, 0);
-			idx++;
-		} else {
-			hw_desc_init(&desc[idx]);
-			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
-				     keylen, NS_BIT);
-			set_flow_mode(&desc[idx], BYPASS);
-			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
-				      keylen, NS_BIT, 0);
-			idx++;
-
-			if ((blocksize - keylen) != 0) {
-				hw_desc_init(&desc[idx]);
-				set_din_const(&desc[idx], 0,
-					      (blocksize - keylen));
-				set_flow_mode(&desc[idx], BYPASS);
-				set_dout_dlli(&desc[idx],
-					      (padded_authkey_dma_addr +
-					       keylen),
-					      (blocksize - keylen), NS_BIT, 0);
-				idx++;
-			}
-		}
-	} else {
-		hw_desc_init(&desc[idx]);
-		set_din_const(&desc[idx], 0, (blocksize - keylen));
-		set_flow_mode(&desc[idx], BYPASS);
-		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
-			      blocksize, NS_BIT, 0);
-		idx++;
-	}
-
-	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
-	if (rc)
-		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
-
-	if (key_dma_addr)
-		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
-
-	return rc;
-}
-
-static int
-cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct rtattr *rta = (struct rtattr *)key;
-	struct cc_crypto_req cc_req = {};
-	struct crypto_authenc_key_param *param;
-	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
-	int seq_len = 0, rc = -EINVAL;
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
-		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
-
-	/* STAT_PHASE_0: Init and sanity checks */
-
-	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
-		if (!RTA_OK(rta, keylen))
-			goto badkey;
-		if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-			goto badkey;
-		if (RTA_PAYLOAD(rta) < sizeof(*param))
-			goto badkey;
-		param = RTA_DATA(rta);
-		ctx->enc_keylen = be32_to_cpu(param->enckeylen);
-		key += RTA_ALIGN(rta->rta_len);
-		keylen -= RTA_ALIGN(rta->rta_len);
-		if (keylen < ctx->enc_keylen)
-			goto badkey;
-		ctx->auth_keylen = keylen - ctx->enc_keylen;
-
-		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
-			/* the nonce is stored in bytes at end of key */
-			if (ctx->enc_keylen <
-			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
-				goto badkey;
-			/* Copy nonce from last 4 bytes in CTR key to
-			 *  first 4 bytes in CTR IV
-			 */
-			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
-			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
-			       CTR_RFC3686_NONCE_SIZE);
-			/* Set CTR key size */
-			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
-		}
-	} else { /* non-authenc - has just one key */
-		ctx->enc_keylen = keylen;
-		ctx->auth_keylen = 0;
-	}
-
-	rc = validate_keys_sizes(ctx);
-	if (rc)
-		goto badkey;
-
-	/* STAT_PHASE_1: Copy key to ctx */
-
-	/* Get key material */
-	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
-	if (ctx->enc_keylen == 24)
-		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
-	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
-	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
-		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
-		if (rc)
-			goto badkey;
-	}
-
-	/* STAT_PHASE_2: Create sequence */
-
-	switch (ctx->auth_mode) {
-	case DRV_HASH_SHA1:
-	case DRV_HASH_SHA256:
-		seq_len = hmac_setkey(desc, ctx);
-		break;
-	case DRV_HASH_XCBC_MAC:
-		seq_len = xcbc_setkey(desc, ctx);
-		break;
-	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
-		break; /* No auth. key setup */
-	default:
-		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
-		rc = -ENOTSUPP;
-		goto badkey;
-	}
-
-	/* STAT_PHASE_3: Submit sequence to HW */
-
-	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
-		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
-		if (rc) {
-			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
-			goto setkey_error;
-		}
-	}
-
-	/* Update STAT_PHASE_3 */
-	return rc;
-
-badkey:
-	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
-	return rc;
-}
-
-static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
-				 unsigned int keylen)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-	if (keylen < 3)
-		return -EINVAL;
-
-	keylen -= 3;
-	memcpy(ctx->ctr_nonce, key + keylen, 3);
-
-	return cc_aead_setkey(tfm, key, keylen);
-}
-
-static int cc_aead_setauthsize(struct crypto_aead *authenc,
-			       unsigned int authsize)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	/* Unsupported auth. sizes */
-	if (authsize == 0 ||
-	    authsize > crypto_aead_maxauthsize(authenc)) {
-		return -ENOTSUPP;
-	}
-
-	ctx->authsize = authsize;
-	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
-
-	return 0;
-}
-
-static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
-{
-	switch (authsize) {
-	case 8:
-	case 12:
-	case 16:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return cc_aead_setauthsize(authenc, authsize);
-}
-
-static int cc_ccm_setauthsize(struct crypto_aead *authenc,
-			      unsigned int authsize)
-{
-	switch (authsize) {
-	case 4:
-	case 6:
-	case 8:
-	case 10:
-	case 12:
-	case 14:
-	case 16:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return cc_aead_setauthsize(authenc, authsize);
-}
-
-static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
-			      struct cc_hw_desc desc[], unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
-	unsigned int idx = *seq_size;
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	switch (assoc_dma_type) {
-	case CC_DMA_BUF_DLLI:
-		dev_dbg(dev, "ASSOC buffer type DLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
-			     areq->assoclen, NS_BIT);
-		set_flow_mode(&desc[idx], flow_mode);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
-		    areq_ctx->cryptlen > 0)
-			set_din_not_last_indication(&desc[idx]);
-		break;
-	case CC_DMA_BUF_MLLI:
-		dev_dbg(dev, "ASSOC buffer type MLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
-			     areq_ctx->assoc.mlli_nents, NS_BIT);
-		set_flow_mode(&desc[idx], flow_mode);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
-		    areq_ctx->cryptlen > 0)
-			set_din_not_last_indication(&desc[idx]);
-		break;
-	case CC_DMA_BUF_NULL:
-	default:
-		dev_err(dev, "Invalid ASSOC buffer type\n");
-	}
-
-	*seq_size = (++idx);
-}
-
-static void cc_proc_authen_desc(struct aead_request *areq,
-				unsigned int flow_mode,
-				struct cc_hw_desc desc[],
-				unsigned int *seq_size, int direct)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
-	unsigned int idx = *seq_size;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	switch (data_dma_type) {
-	case CC_DMA_BUF_DLLI:
-	{
-		struct scatterlist *cipher =
-			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-			areq_ctx->dst_sgl : areq_ctx->src_sgl;
-
-		unsigned int offset =
-			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-			areq_ctx->dst_offset : areq_ctx->src_offset;
-		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_DLLI,
-			     (sg_dma_address(cipher) + offset),
-			     areq_ctx->cryptlen, NS_BIT);
-		set_flow_mode(&desc[idx], flow_mode);
-		break;
-	}
-	case CC_DMA_BUF_MLLI:
-	{
-		/* DOUBLE-PASS flow (as default)
-		 * assoc. + iv + data -compact in one table
-		 * if assoclen is ZERO only IV perform
-		 */
-		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
-		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
-
-		if (areq_ctx->is_single_pass) {
-			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-				mlli_addr = areq_ctx->dst.sram_addr;
-				mlli_nents = areq_ctx->dst.mlli_nents;
-			} else {
-				mlli_addr = areq_ctx->src.sram_addr;
-				mlli_nents = areq_ctx->src.mlli_nents;
-			}
-		}
-
-		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
-			     NS_BIT);
-		set_flow_mode(&desc[idx], flow_mode);
-		break;
-	}
-	case CC_DMA_BUF_NULL:
-	default:
-		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
-	}
-
-	*seq_size = (++idx);
-}
-
-static void cc_proc_cipher_desc(struct aead_request *areq,
-				unsigned int flow_mode,
-				struct cc_hw_desc desc[],
-				unsigned int *seq_size)
-{
-	unsigned int idx = *seq_size;
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	if (areq_ctx->cryptlen == 0)
-		return; /*null processing*/
-
-	switch (data_dma_type) {
-	case CC_DMA_BUF_DLLI:
-		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_DLLI,
-			     (sg_dma_address(areq_ctx->src_sgl) +
-			      areq_ctx->src_offset), areq_ctx->cryptlen,
-			      NS_BIT);
-		set_dout_dlli(&desc[idx],
-			      (sg_dma_address(areq_ctx->dst_sgl) +
-			       areq_ctx->dst_offset),
-			      areq_ctx->cryptlen, NS_BIT, 0);
-		set_flow_mode(&desc[idx], flow_mode);
-		break;
-	case CC_DMA_BUF_MLLI:
-		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
-			     areq_ctx->src.mlli_nents, NS_BIT);
-		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
-			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
-		set_flow_mode(&desc[idx], flow_mode);
-		break;
-	case CC_DMA_BUF_NULL:
-	default:
-		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
-	}
-
-	*seq_size = (++idx);
-}
-
-static void cc_proc_digest_desc(struct aead_request *req,
-				struct cc_hw_desc desc[],
-				unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int idx = *seq_size;
-	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
-				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	int direct = req_ctx->gen_ctx.op_type;
-
-	/* Get final ICV result */
-	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		hw_desc_init(&desc[idx]);
-		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
-			      NS_BIT, 1);
-		set_queue_last_ind(&desc[idx]);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-			set_aes_not_hash_mode(&desc[idx]);
-			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-		} else {
-			set_cipher_config0(&desc[idx],
-					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-			set_cipher_mode(&desc[idx], hash_mode);
-		}
-	} else { /*Decrypt*/
-		/* Get ICV out from hardware */
-		hw_desc_init(&desc[idx]);
-		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
-			      ctx->authsize, NS_BIT, 1);
-		set_queue_last_ind(&desc[idx]);
-		set_cipher_config0(&desc[idx],
-				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-			set_aes_not_hash_mode(&desc[idx]);
-		} else {
-			set_cipher_mode(&desc[idx], hash_mode);
-		}
-	}
-
-	*seq_size = (++idx);
-}
-
-static void cc_set_cipher_desc(struct aead_request *req,
-			       struct cc_hw_desc desc[],
-			       unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int hw_iv_size = req_ctx->hw_iv_size;
-	unsigned int idx = *seq_size;
-	int direct = req_ctx->gen_ctx.op_type;
-
-	/* Setup cipher state */
-	hw_desc_init(&desc[idx]);
-	set_cipher_config0(&desc[idx], direct);
-	set_flow_mode(&desc[idx], ctx->flow_mode);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
-		     hw_iv_size, NS_BIT);
-	if (ctx->cipher_mode == DRV_CIPHER_CTR)
-		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-	else
-		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	set_cipher_mode(&desc[idx], ctx->cipher_mode);
-	idx++;
-
-	/* Setup enc. key */
-	hw_desc_init(&desc[idx]);
-	set_cipher_config0(&desc[idx], direct);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_flow_mode(&desc[idx], ctx->flow_mode);
-	if (ctx->flow_mode == S_DIN_to_AES) {
-		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
-			      ctx->enc_keylen), NS_BIT);
-		set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	} else {
-		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-			     ctx->enc_keylen, NS_BIT);
-		set_key_size_des(&desc[idx], ctx->enc_keylen);
-	}
-	set_cipher_mode(&desc[idx], ctx->cipher_mode);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
-			   unsigned int *seq_size, unsigned int data_flow_mode)
-{
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	int direct = req_ctx->gen_ctx.op_type;
-	unsigned int idx = *seq_size;
-
-	if (req_ctx->cryptlen == 0)
-		return; /*null processing*/
-
-	cc_set_cipher_desc(req, desc, &idx);
-	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
-	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		/* We must wait for DMA to write all cipher */
-		hw_desc_init(&desc[idx]);
-		set_din_no_dma(&desc[idx], 0, 0xfffff0);
-		set_dout_no_dma(&desc[idx], 0, 0, 1);
-		idx++;
-	}
-
-	*seq_size = idx;
-}
-
-static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
-			     unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
-				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
-				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
-	unsigned int idx = *seq_size;
-
-	/* Loading hash ipad xor key state */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
-		     NS_BIT);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	idx++;
-
-	/* Load init. digest len (64 bytes) */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
-		     HASH_LEN_SIZE);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
-			     unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	unsigned int idx = *seq_size;
-
-	/* Loading MAC state */
-	hw_desc_init(&desc[idx]);
-	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* Setup XCBC MAC K1 */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
-		     AES_KEYSIZE_128, NS_BIT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* Setup XCBC MAC K2 */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
-		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* Setup XCBC MAC K3 */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
-		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static void cc_proc_header_desc(struct aead_request *req,
-				struct cc_hw_desc desc[],
-				unsigned int *seq_size)
-{
-	unsigned int idx = *seq_size;
-	/* Hash associated data */
-	if (req->assoclen > 0)
-		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
-
-	/* Hash IV */
-	*seq_size = idx;
-}
-
-static void cc_proc_scheme_desc(struct aead_request *req,
-				struct cc_hw_desc desc[],
-				unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
-	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
-				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
-				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
-	unsigned int idx = *seq_size;
-
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], hash_mode);
-	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
-		      HASH_LEN_SIZE);
-	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
-	set_cipher_do(&desc[idx], DO_PAD);
-	idx++;
-
-	/* Get final ICV result */
-	hw_desc_init(&desc[idx]);
-	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
-		      digest_size);
-	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-	set_cipher_mode(&desc[idx], hash_mode);
-	idx++;
-
-	/* Loading hash opad xor key state */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
-		     digest_size, NS_BIT);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	idx++;
-
-	/* Load init. digest len (64 bytes) */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
-		     HASH_LEN_SIZE);
-	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	idx++;
-
-	/* Perform HASH update */
-	hw_desc_init(&desc[idx]);
-	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
-		     digest_size);
-	set_flow_mode(&desc[idx], DIN_HASH);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static void cc_mlli_to_sram(struct aead_request *req,
-			    struct cc_hw_desc desc[], unsigned int *seq_size)
-{
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
-	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
-	    !req_ctx->is_single_pass) {
-		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
-			(unsigned int)ctx->drvdata->mlli_sram_addr,
-			req_ctx->mlli_params.mlli_len);
-		/* Copy MLLI table host-to-sram */
-		hw_desc_init(&desc[*seq_size]);
-		set_din_type(&desc[*seq_size], DMA_DLLI,
-			     req_ctx->mlli_params.mlli_dma_addr,
-			     req_ctx->mlli_params.mlli_len, NS_BIT);
-		set_dout_sram(&desc[*seq_size],
-			      ctx->drvdata->mlli_sram_addr,
-			      req_ctx->mlli_params.mlli_len);
-		set_flow_mode(&desc[*seq_size], BYPASS);
-		(*seq_size)++;
-	}
-}
-
-static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
-					  enum cc_flow_mode setup_flow_mode,
-					  bool is_single_pass)
-{
-	enum cc_flow_mode data_flow_mode;
-
-	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		if (setup_flow_mode == S_DIN_to_AES)
-			data_flow_mode = is_single_pass ?
-				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
-		else
-			data_flow_mode = is_single_pass ?
-				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
-	} else { /* Decrypt */
-		if (setup_flow_mode == S_DIN_to_AES)
-			data_flow_mode = is_single_pass ?
-				AES_and_HASH : DIN_AES_DOUT;
-		else
-			data_flow_mode = is_single_pass ?
-				DES_and_HASH : DIN_DES_DOUT;
-	}
-
-	return data_flow_mode;
-}
-
-static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
-			    unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	int direct = req_ctx->gen_ctx.op_type;
-	unsigned int data_flow_mode =
-		cc_get_data_flow(direct, ctx->flow_mode,
-				 req_ctx->is_single_pass);
-
-	if (req_ctx->is_single_pass) {
-		/**
-		 * Single-pass flow
-		 */
-		cc_set_hmac_desc(req, desc, seq_size);
-		cc_set_cipher_desc(req, desc, seq_size);
-		cc_proc_header_desc(req, desc, seq_size);
-		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
-		cc_proc_scheme_desc(req, desc, seq_size);
-		cc_proc_digest_desc(req, desc, seq_size);
-		return;
-	}
-
-	/**
-	 * Double-pass flow
-	 * Fallback for unsupported single-pass modes,
-	 * i.e. using assoc. data of non-word-multiple
-	 */
-	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		/* encrypt first.. */
-		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
-		/* authenc after..*/
-		cc_set_hmac_desc(req, desc, seq_size);
-		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
-		cc_proc_scheme_desc(req, desc, seq_size);
-		cc_proc_digest_desc(req, desc, seq_size);
-
-	} else { /*DECRYPT*/
-		/* authenc first..*/
-		cc_set_hmac_desc(req, desc, seq_size);
-		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
-		cc_proc_scheme_desc(req, desc, seq_size);
-		/* decrypt after.. */
-		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
-		/* read the digest result with setting the completion bit
-		 * must be after the cipher operation
-		 */
-		cc_proc_digest_desc(req, desc, seq_size);
-	}
-}
-
-static void
-cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
-		unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	int direct = req_ctx->gen_ctx.op_type;
-	unsigned int data_flow_mode =
-		cc_get_data_flow(direct, ctx->flow_mode,
-				 req_ctx->is_single_pass);
-
-	if (req_ctx->is_single_pass) {
-		/**
-		 * Single-pass flow
-		 */
-		cc_set_xcbc_desc(req, desc, seq_size);
-		cc_set_cipher_desc(req, desc, seq_size);
-		cc_proc_header_desc(req, desc, seq_size);
-		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
-		cc_proc_digest_desc(req, desc, seq_size);
-		return;
-	}
-
-	/**
-	 * Double-pass flow
-	 * Fallback for unsupported single-pass modes,
-	 * i.e. using assoc. data of non-word-multiple
-	 */
-	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		/* encrypt first.. */
-		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
-		/* authenc after.. */
-		cc_set_xcbc_desc(req, desc, seq_size);
-		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
-		cc_proc_digest_desc(req, desc, seq_size);
-	} else { /*DECRYPT*/
-		/* authenc first.. */
-		cc_set_xcbc_desc(req, desc, seq_size);
-		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
-		/* decrypt after..*/
-		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
-		/* read the digest result with setting the completion bit
-		 * must be after the cipher operation
-		 */
-		cc_proc_digest_desc(req, desc, seq_size);
-	}
-}
-
-static int validate_data_size(struct cc_aead_ctx *ctx,
-			      enum drv_crypto_direction direct,
-			      struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	unsigned int assoclen = req->assoclen;
-	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
-			(req->cryptlen - ctx->authsize) : req->cryptlen;
-
-	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
-	    req->cryptlen < ctx->authsize)
-		goto data_size_err;
-
-	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
-
-	switch (ctx->flow_mode) {
-	case S_DIN_to_AES:
-		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
-		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
-			goto data_size_err;
-		if (ctx->cipher_mode == DRV_CIPHER_CCM)
-			break;
-		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
-			if (areq_ctx->plaintext_authenticate_only)
-				areq_ctx->is_single_pass = false;
-			break;
-		}
-
-		if (!IS_ALIGNED(assoclen, sizeof(u32)))
-			areq_ctx->is_single_pass = false;
-
-		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
-		    !IS_ALIGNED(cipherlen, sizeof(u32)))
-			areq_ctx->is_single_pass = false;
-
-		break;
-	case S_DIN_to_DES:
-		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
-			goto data_size_err;
-		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
-			areq_ctx->is_single_pass = false;
-		break;
-	default:
-		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
-		goto data_size_err;
-	}
-
-	return 0;
-
-data_size_err:
-	return -EINVAL;
-}
-
-static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
-{
-	unsigned int len = 0;
-
-	if (header_size == 0)
-		return 0;
-
-	if (header_size < ((1UL << 16) - (1UL << 8))) {
-		len = 2;
-
-		pa0_buff[0] = (header_size >> 8) & 0xFF;
-		pa0_buff[1] = header_size & 0xFF;
-	} else {
-		len = 6;
-
-		pa0_buff[0] = 0xFF;
-		pa0_buff[1] = 0xFE;
-		pa0_buff[2] = (header_size >> 24) & 0xFF;
-		pa0_buff[3] = (header_size >> 16) & 0xFF;
-		pa0_buff[4] = (header_size >> 8) & 0xFF;
-		pa0_buff[5] = header_size & 0xFF;
-	}
-
-	return len;
-}
-
-static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
-{
-	__be32 data;
-
-	memset(block, 0, csize);
-	block += csize;
-
-	if (csize >= 4)
-		csize = 4;
-	else if (msglen > (1 << (8 * csize)))
-		return -EOVERFLOW;
-
-	data = cpu_to_be32(msglen);
-	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
-
-	return 0;
-}
-
-static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
-		  unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int idx = *seq_size;
-	unsigned int cipher_flow_mode;
-	dma_addr_t mac_result;
-
-	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		cipher_flow_mode = AES_to_HASH_and_DOUT;
-		mac_result = req_ctx->mac_buf_dma_addr;
-	} else { /* Encrypt */
-		cipher_flow_mode = AES_and_HASH;
-		mac_result = req_ctx->icv_dma_addr;
-	}
-
-	/* load key */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
-	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
-		      ctx->enc_keylen), NS_BIT);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	/* load ctr state */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_din_type(&desc[idx], DMA_DLLI,
-		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	/* load MAC key */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
-	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
-		      ctx->enc_keylen), NS_BIT);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* load MAC state */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* process assoc data */
-	if (req->assoclen > 0) {
-		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
-	} else {
-		hw_desc_init(&desc[idx]);
-		set_din_type(&desc[idx], DMA_DLLI,
-			     sg_dma_address(&req_ctx->ccm_adata_sg),
-			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
-		set_flow_mode(&desc[idx], DIN_HASH);
-		idx++;
-	}
-
-	/* process the cipher */
-	if (req_ctx->cryptlen)
-		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
-
-	/* Read temporal MAC */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
-	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
-		      NS_BIT, 0);
-	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-	set_aes_not_hash_mode(&desc[idx]);
-	idx++;
-
-	/* load AES-CTR state (for last MAC calculation)*/
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
-	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	hw_desc_init(&desc[idx]);
-	set_din_no_dma(&desc[idx], 0, 0xfffff0);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	idx++;
-
-	/* encrypt the "T" value and store MAC in mac_state */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
-		     ctx->authsize, NS_BIT);
-	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
-	set_queue_last_ind(&desc[idx]);
-	set_flow_mode(&desc[idx], DIN_AES_DOUT);
-	idx++;
-
-	*seq_size = idx;
-	return 0;
-}
-
-static int config_ccm_adata(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	//unsigned int size_of_a = 0, rem_a_size = 0;
-	unsigned int lp = req->iv[0];
-	/* Note: The code assume that req->iv[0] already contains the value
-	 * of L' of RFC3610
-	 */
-	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
-	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
-	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
-	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
-	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
-	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-				req->cryptlen :
-				(req->cryptlen - ctx->authsize);
-	int rc;
-
-	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
-	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
-
-	/* taken from crypto/ccm.c */
-	/* 2 <= L <= 8, so 1 <= L' <= 7. */
-	if (l < 2 || l > 8) {
-		dev_err(dev, "illegal iv value %X\n", req->iv[0]);
-		return -EINVAL;
-	}
-	memcpy(b0, req->iv, AES_BLOCK_SIZE);
-
-	/* format control info per RFC 3610 and
-	 * NIST Special Publication 800-38C
-	 */
-	*b0 |= (8 * ((m - 2) / 2));
-	if (req->assoclen > 0)
-		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
-
-	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
-	if (rc) {
-		dev_err(dev, "message len overflow detected");
-		return rc;
-	}
-	 /* END of "taken from crypto/ccm.c" */
-
-	/* l(a) - size of associated data. */
-	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
-
-	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
-	req->iv[15] = 1;
-
-	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
-	ctr_count_0[15] = 0;
-
-	return 0;
-}
-
-static void cc_proc_rfc4309_ccm(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-
-	/* L' */
-	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
-	/* For RFC 4309, always use 4 bytes for message length
-	 * (at most 2^32-1 bytes).
-	 */
-	areq_ctx->ctr_iv[0] = 3;
-
-	/* In RFC 4309 there is an 11-bytes nonce+IV part,
-	 * that we build here.
-	 */
-	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
-	       CCM_BLOCK_NONCE_SIZE);
-	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
-	       CCM_BLOCK_IV_SIZE);
-	req->iv = areq_ctx->ctr_iv;
-	req->assoclen -= CCM_BLOCK_IV_SIZE;
-}
-
-static void cc_set_ghash_desc(struct aead_request *req,
-			      struct cc_hw_desc desc[], unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int idx = *seq_size;
-
-	/* load key to AES*/
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
-	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-		     ctx->enc_keylen, NS_BIT);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	/* process one zero block to generate hkey */
-	hw_desc_init(&desc[idx]);
-	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
-		      NS_BIT, 0);
-	set_flow_mode(&desc[idx], DIN_AES_DOUT);
-	idx++;
-
-	/* Memory Barrier */
-	hw_desc_init(&desc[idx]);
-	set_din_no_dma(&desc[idx], 0, 0xfffff0);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	idx++;
-
-	/* Load GHASH subkey */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
-	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	idx++;
-
-	/* Configure Hash Engine to work with GHASH.
-	 * Since it was not possible to extend HASH submodes to add GHASH,
-	 * The following command is necessary in order to
-	 * select GHASH (according to HW designers)
-	 */
-	hw_desc_init(&desc[idx]);
-	set_din_no_dma(&desc[idx], 0, 0xfffff0);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
-	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
-	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	idx++;
-
-	/* Load GHASH initial STATE (which is 0). (for any hash there is an
-	 * initial state)
-	 */
-	hw_desc_init(&desc[idx]);
-	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	set_flow_mode(&desc[idx], S_DIN_to_HASH);
-	set_aes_not_hash_mode(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
-	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
-			     unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int idx = *seq_size;
-
-	/* load key to AES*/
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
-	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-		     ctx->enc_keylen, NS_BIT);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
-		/* load AES/CTR initial CTR value inc by 2*/
-		hw_desc_init(&desc[idx]);
-		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
-		set_key_size_aes(&desc[idx], ctx->enc_keylen);
-		set_din_type(&desc[idx], DMA_DLLI,
-			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-		set_flow_mode(&desc[idx], S_DIN_to_AES);
-		idx++;
-	}
-
-	*seq_size = idx;
-}
-
-static void cc_proc_gcm_result(struct aead_request *req,
-			       struct cc_hw_desc desc[],
-			       unsigned int *seq_size)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	dma_addr_t mac_result;
-	unsigned int idx = *seq_size;
-
-	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		mac_result = req_ctx->mac_buf_dma_addr;
-	} else { /* Encrypt */
-		mac_result = req_ctx->icv_dma_addr;
-	}
-
-	/* process(ghash) gcm_block_len */
-	hw_desc_init(&desc[idx]);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_flow_mode(&desc[idx], DIN_HASH);
-	idx++;
-
-	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
-	set_din_no_dma(&desc[idx], 0, 0xfffff0);
-	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
-		      NS_BIT, 0);
-	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
-	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
-	set_aes_not_hash_mode(&desc[idx]);
-
-	idx++;
-
-	/* load AES/CTR initial CTR value inc by 1*/
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
-	set_key_size_aes(&desc[idx], ctx->enc_keylen);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
-	set_flow_mode(&desc[idx], S_DIN_to_AES);
-	idx++;
-
-	/* Memory Barrier */
-	hw_desc_init(&desc[idx]);
-	set_din_no_dma(&desc[idx], 0, 0xfffff0);
-	set_dout_no_dma(&desc[idx], 0, 0, 1);
-	idx++;
-
-	/* process GCTR on stored GHASH and store MAC in mac_state*/
-	hw_desc_init(&desc[idx]);
-	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
-	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
-		     AES_BLOCK_SIZE, NS_BIT);
-	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
-	set_queue_last_ind(&desc[idx]);
-	set_flow_mode(&desc[idx], DIN_AES_DOUT);
-	idx++;
-
-	*seq_size = idx;
-}
-
-static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
-		  unsigned int *seq_size)
-{
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	unsigned int cipher_flow_mode;
-
-	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		cipher_flow_mode = AES_and_HASH;
-	} else { /* Encrypt */
-		cipher_flow_mode = AES_to_HASH_and_DOUT;
-	}
-
-	//in RFC4543 no data to encrypt. just copy data from src to dest.
-	if (req_ctx->plaintext_authenticate_only) {
-		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
-		cc_set_ghash_desc(req, desc, seq_size);
-		/* process(ghash) assoc data */
-		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
-		cc_set_gctr_desc(req, desc, seq_size);
-		cc_proc_gcm_result(req, desc, seq_size);
-		return 0;
-	}
-
-	// for gcm and rfc4106.
-	cc_set_ghash_desc(req, desc, seq_size);
-	/* process(ghash) assoc data */
-	if (req->assoclen > 0)
-		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
-	cc_set_gctr_desc(req, desc, seq_size);
-	/* process(gctr+ghash) */
-	if (req_ctx->cryptlen)
-		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
-	cc_proc_gcm_result(req, desc, seq_size);
-
-	return 0;
-}
-
-static int config_gcm_context(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-				req->cryptlen :
-				(req->cryptlen - ctx->authsize);
-	__be32 counter = cpu_to_be32(2);
-
-	dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
-		__func__, cryptlen, req->assoclen, ctx->authsize);
-
-	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
-
-	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
-
-	memcpy(req->iv + 12, &counter, 4);
-	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
-
-	counter = cpu_to_be32(1);
-	memcpy(req->iv + 12, &counter, 4);
-	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
-
-	if (!req_ctx->plaintext_authenticate_only) {
-		__be64 temp64;
-
-		temp64 = cpu_to_be64(req->assoclen * 8);
-		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
-		temp64 = cpu_to_be64(cryptlen * 8);
-		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
-	} else {
-		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
-		 * data that is nothing is encrypted.
-		 */
-		__be64 temp64;
-
-		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
-				      cryptlen) * 8);
-		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
-		temp64 = 0;
-		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
-	}
-
-	return 0;
-}
-
-static void cc_proc_rfc4_gcm(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-
-	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
-	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
-	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
-	       GCM_BLOCK_RFC4_IV_SIZE);
-	req->iv = areq_ctx->ctr_iv;
-	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
-}
-
-static int cc_proc_aead(struct aead_request *req,
-			enum drv_crypto_direction direct)
-{
-	int rc = 0;
-	int seq_len = 0;
-	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct cc_crypto_req cc_req = {};
-
-	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
-		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
-		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
-		sg_virt(req->dst), req->dst->offset, req->cryptlen);
-
-	/* STAT_PHASE_0: Init and sanity checks */
-
-	/* Check data length according to mode */
-	if (validate_data_size(ctx, direct, req)) {
-		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
-			req->cryptlen, req->assoclen);
-		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
-		return -EINVAL;
-	}
-
-	/* Setup DX request structure */
-	cc_req.user_cb = (void *)cc_aead_complete;
-	cc_req.user_arg = (void *)req;
-
-	/* Setup request context */
-	areq_ctx->gen_ctx.op_type = direct;
-	areq_ctx->req_authsize = ctx->authsize;
-	areq_ctx->cipher_mode = ctx->cipher_mode;
-
-	/* STAT_PHASE_1: Map buffers */
-
-	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
-		/* Build CTR IV - Copy nonce from last 4 bytes in
-		 * CTR key to first 4 bytes in CTR IV
-		 */
-		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
-		       CTR_RFC3686_NONCE_SIZE);
-		if (!areq_ctx->backup_giv) /*User none-generated IV*/
-			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
-			       req->iv, CTR_RFC3686_IV_SIZE);
-		/* Initialize counter portion of counter block */
-		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
-			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
-
-		/* Replace with counter iv */
-		req->iv = areq_ctx->ctr_iv;
-		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
-	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
-		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
-		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
-		if (areq_ctx->ctr_iv != req->iv) {
-			memcpy(areq_ctx->ctr_iv, req->iv,
-			       crypto_aead_ivsize(tfm));
-			req->iv = areq_ctx->ctr_iv;
-		}
-	}  else {
-		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
-	}
-
-	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
-		rc = config_ccm_adata(req);
-		if (rc) {
-			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
-				rc);
-			goto exit;
-		}
-	} else {
-		areq_ctx->ccm_hdr_size = ccm_header_size_null;
-	}
-
-	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
-		rc = config_gcm_context(req);
-		if (rc) {
-			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
-				rc);
-			goto exit;
-		}
-	}
-
-	rc = cc_map_aead_request(ctx->drvdata, req);
-	if (rc) {
-		dev_err(dev, "map_request() failed\n");
-		goto exit;
-	}
-
-	/* do we need to generate IV? */
-	if (areq_ctx->backup_giv) {
-		/* set the DMA mapped IV address*/
-		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
-			cc_req.ivgen_dma_addr[0] =
-				areq_ctx->gen_ctx.iv_dma_addr +
-				CTR_RFC3686_NONCE_SIZE;
-			cc_req.ivgen_dma_addr_len = 1;
-		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
-			/* In ccm, the IV needs to exist both inside B0 and
-			 * inside the counter.It is also copied to iv_dma_addr
-			 * for other reasons (like returning it to the user).
-			 * So, using 3 (identical) IV outputs.
-			 */
-			cc_req.ivgen_dma_addr[0] =
-				areq_ctx->gen_ctx.iv_dma_addr +
-				CCM_BLOCK_IV_OFFSET;
-			cc_req.ivgen_dma_addr[1] =
-				sg_dma_address(&areq_ctx->ccm_adata_sg) +
-				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
-			cc_req.ivgen_dma_addr[2] =
-				sg_dma_address(&areq_ctx->ccm_adata_sg) +
-				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
-			cc_req.ivgen_dma_addr_len = 3;
-		} else {
-			cc_req.ivgen_dma_addr[0] =
-				areq_ctx->gen_ctx.iv_dma_addr;
-			cc_req.ivgen_dma_addr_len = 1;
-		}
-
-		/* set the IV size (8/16 B long)*/
-		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
-	}
-
-	/* STAT_PHASE_2: Create sequence */
-
-	/* Load MLLI tables to SRAM if necessary */
-	cc_mlli_to_sram(req, desc, &seq_len);
-
-	/*TODO: move seq len by reference */
-	switch (ctx->auth_mode) {
-	case DRV_HASH_SHA1:
-	case DRV_HASH_SHA256:
-		cc_hmac_authenc(req, desc, &seq_len);
-		break;
-	case DRV_HASH_XCBC_MAC:
-		cc_xcbc_authenc(req, desc, &seq_len);
-		break;
-	case DRV_HASH_NULL:
-		if (ctx->cipher_mode == DRV_CIPHER_CCM)
-			cc_ccm(req, desc, &seq_len);
-		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
-			cc_gcm(req, desc, &seq_len);
-		break;
-	default:
-		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
-		cc_unmap_aead_request(dev, req);
-		rc = -ENOTSUPP;
-		goto exit;
-	}
-
-	/* STAT_PHASE_3: Lock HW and push sequence */
-
-	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
-
-	if (rc != -EINPROGRESS && rc != -EBUSY) {
-		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
-		cc_unmap_aead_request(dev, req);
-	}
-
-exit:
-	return rc;
-}
-
-static int cc_aead_encrypt(struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc;
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-	areq_ctx->is_gcm4543 = false;
-
-	areq_ctx->plaintext_authenticate_only = false;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-
-	return rc;
-}
-
-static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
-{
-	/* Very similar to cc_aead_encrypt() above. */
-
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	int rc = -EINVAL;
-
-	if (!valid_assoclen(req)) {
-		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
-		goto out;
-	}
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-	areq_ctx->is_gcm4543 = true;
-
-	cc_proc_rfc4309_ccm(req);
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-out:
-	return rc;
-}
-
-static int cc_aead_decrypt(struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc;
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-	areq_ctx->is_gcm4543 = false;
-
-	areq_ctx->plaintext_authenticate_only = false;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-
-	return rc;
-}
-
-static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc = -EINVAL;
-
-	if (!valid_assoclen(req)) {
-		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
-		goto out;
-	}
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-
-	areq_ctx->is_gcm4543 = true;
-	cc_proc_rfc4309_ccm(req);
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-
-out:
-	return rc;
-}
-
-static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
-				 unsigned int keylen)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
-
-	if (keylen < 4)
-		return -EINVAL;
-
-	keylen -= 4;
-	memcpy(ctx->ctr_nonce, key + keylen, 4);
-
-	return cc_aead_setkey(tfm, key, keylen);
-}
-
-static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
-				 unsigned int keylen)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
-
-	if (keylen < 4)
-		return -EINVAL;
-
-	keylen -= 4;
-	memcpy(ctx->ctr_nonce, key + keylen, 4);
-
-	return cc_aead_setkey(tfm, key, keylen);
-}
-
-static int cc_gcm_setauthsize(struct crypto_aead *authenc,
-			      unsigned int authsize)
-{
-	switch (authsize) {
-	case 4:
-	case 8:
-	case 12:
-	case 13:
-	case 14:
-	case 15:
-	case 16:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return cc_aead_setauthsize(authenc, authsize);
-}
-
-static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "authsize %d\n", authsize);
-
-	switch (authsize) {
-	case 8:
-	case 12:
-	case 16:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return cc_aead_setauthsize(authenc, authsize);
-}
-
-static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
-{
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-
-	dev_dbg(dev, "authsize %d\n", authsize);
-
-	if (authsize != 16)
-		return -EINVAL;
-
-	return cc_aead_setauthsize(authenc, authsize);
-}
-
-static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
-{
-	/* Very similar to cc_aead_encrypt() above. */
-
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc = -EINVAL;
-
-	if (!valid_assoclen(req)) {
-		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
-		goto out;
-	}
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-
-	areq_ctx->plaintext_authenticate_only = false;
-
-	cc_proc_rfc4_gcm(req);
-	areq_ctx->is_gcm4543 = true;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-out:
-	return rc;
-}
-
-static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
-{
-	/* Very similar to cc_aead_encrypt() above. */
-
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc;
-
-	//plaintext is not encryped with rfc4543
-	areq_ctx->plaintext_authenticate_only = true;
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-
-	cc_proc_rfc4_gcm(req);
-	areq_ctx->is_gcm4543 = true;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-
-	return rc;
-}
-
-static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
-{
-	/* Very similar to cc_aead_decrypt() above. */
-
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc = -EINVAL;
-
-	if (!valid_assoclen(req)) {
-		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
-		goto out;
-	}
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-
-	areq_ctx->plaintext_authenticate_only = false;
-
-	cc_proc_rfc4_gcm(req);
-	areq_ctx->is_gcm4543 = true;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-out:
-	return rc;
-}
-
-static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
-{
-	/* Very similar to cc_aead_decrypt() above. */
-
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc;
-
-	//plaintext is not decryped with rfc4543
-	areq_ctx->plaintext_authenticate_only = true;
-
-	/* No generated IV required */
-	areq_ctx->backup_iv = req->iv;
-	areq_ctx->backup_giv = NULL;
-
-	cc_proc_rfc4_gcm(req);
-	areq_ctx->is_gcm4543 = true;
-
-	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS && rc != -EBUSY)
-		req->iv = areq_ctx->backup_iv;
-
-	return rc;
-}
-
-/* DX Block aead alg */
-static struct cc_alg_template aead_algs[] = {
-	{
-		.name = "authenc(hmac(sha1),cbc(aes))",
-		.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CBC,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_SHA1,
-	},
-	{
-		.name = "authenc(hmac(sha1),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha1-cbc-des3-dx",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CBC,
-		.flow_mode = S_DIN_to_DES,
-		.auth_mode = DRV_HASH_SHA1,
-	},
-	{
-		.name = "authenc(hmac(sha256),cbc(aes))",
-		.driver_name = "authenc-hmac-sha256-cbc-aes-dx",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CBC,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_SHA256,
-	},
-	{
-		.name = "authenc(hmac(sha256),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha256-cbc-des3-dx",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CBC,
-		.flow_mode = S_DIN_to_DES,
-		.auth_mode = DRV_HASH_SHA256,
-	},
-	{
-		.name = "authenc(xcbc(aes),cbc(aes))",
-		.driver_name = "authenc-xcbc-aes-cbc-aes-dx",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CBC,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_XCBC_MAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_SHA1,
-	},
-	{
-		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_SHA256,
-	},
-	{
-		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_aead_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_XCBC_MAC,
-	},
-	{
-		.name = "ccm(aes)",
-		.driver_name = "ccm-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_ccm_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CCM,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_NULL,
-	},
-	{
-		.name = "rfc4309(ccm(aes))",
-		.driver_name = "rfc4309-ccm-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_rfc4309_ccm_setkey,
-			.setauthsize = cc_rfc4309_ccm_setauthsize,
-			.encrypt = cc_rfc4309_ccm_encrypt,
-			.decrypt = cc_rfc4309_ccm_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = CCM_BLOCK_IV_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_CCM,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_NULL,
-	},
-	{
-		.name = "gcm(aes)",
-		.driver_name = "gcm-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_aead_setkey,
-			.setauthsize = cc_gcm_setauthsize,
-			.encrypt = cc_aead_encrypt,
-			.decrypt = cc_aead_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = 12,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_GCTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_NULL,
-	},
-	{
-		.name = "rfc4106(gcm(aes))",
-		.driver_name = "rfc4106-gcm-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_rfc4106_gcm_setkey,
-			.setauthsize = cc_rfc4106_gcm_setauthsize,
-			.encrypt = cc_rfc4106_gcm_encrypt,
-			.decrypt = cc_rfc4106_gcm_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_GCTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_NULL,
-	},
-	{
-		.name = "rfc4543(gcm(aes))",
-		.driver_name = "rfc4543-gcm-aes-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = cc_rfc4543_gcm_setkey,
-			.setauthsize = cc_rfc4543_gcm_setauthsize,
-			.encrypt = cc_rfc4543_gcm_encrypt,
-			.decrypt = cc_rfc4543_gcm_decrypt,
-			.init = cc_aead_init,
-			.exit = cc_aead_exit,
-			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
-			.maxauthsize = AES_BLOCK_SIZE,
-		},
-		.cipher_mode = DRV_CIPHER_GCTR,
-		.flow_mode = S_DIN_to_AES,
-		.auth_mode = DRV_HASH_NULL,
-	},
-};
-
-static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
-						struct device *dev)
-{
-	struct cc_crypto_alg *t_alg;
-	struct aead_alg *alg;
-
-	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
-	if (!t_alg)
-		return ERR_PTR(-ENOMEM);
-
-	alg = &tmpl->template_aead;
-
-	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
-	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-		 tmpl->driver_name);
-	alg->base.cra_module = THIS_MODULE;
-	alg->base.cra_priority = CC_CRA_PRIO;
-
-	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-			 tmpl->type;
-	alg->init = cc_aead_init;
-	alg->exit = cc_aead_exit;
-
-	t_alg->aead_alg = *alg;
-
-	t_alg->cipher_mode = tmpl->cipher_mode;
-	t_alg->flow_mode = tmpl->flow_mode;
-	t_alg->auth_mode = tmpl->auth_mode;
-
-	return t_alg;
-}
-
-int cc_aead_free(struct cc_drvdata *drvdata)
-{
-	struct cc_crypto_alg *t_alg, *n;
-	struct cc_aead_handle *aead_handle =
-		(struct cc_aead_handle *)drvdata->aead_handle;
-
-	if (aead_handle) {
-		/* Remove registered algs */
-		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
-					 entry) {
-			crypto_unregister_aead(&t_alg->aead_alg);
-			list_del(&t_alg->entry);
-			kfree(t_alg);
-		}
-		kfree(aead_handle);
-		drvdata->aead_handle = NULL;
-	}
-
-	return 0;
-}
-
-int cc_aead_alloc(struct cc_drvdata *drvdata)
-{
-	struct cc_aead_handle *aead_handle;
-	struct cc_crypto_alg *t_alg;
-	int rc = -ENOMEM;
-	int alg;
-	struct device *dev = drvdata_to_dev(drvdata);
-
-	aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
-	if (!aead_handle) {
-		rc = -ENOMEM;
-		goto fail0;
-	}
-
-	INIT_LIST_HEAD(&aead_handle->aead_list);
-	drvdata->aead_handle = aead_handle;
-
-	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
-							 MAX_HMAC_DIGEST_SIZE);
-
-	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
-		dev_err(dev, "SRAM pool exhausted\n");
-		rc = -ENOMEM;
-		goto fail1;
-	}
-
-	/* Linux crypto */
-	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
-		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
-		if (IS_ERR(t_alg)) {
-			rc = PTR_ERR(t_alg);
-			dev_err(dev, "%s alg allocation failed\n",
-				aead_algs[alg].driver_name);
-			goto fail1;
-		}
-		t_alg->drvdata = drvdata;
-		rc = crypto_register_aead(&t_alg->aead_alg);
-		if (rc) {
-			dev_err(dev, "%s alg registration failed\n",
-				t_alg->aead_alg.base.cra_driver_name);
-			goto fail2;
-		} else {
-			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
-			dev_dbg(dev, "Registered %s\n",
-				t_alg->aead_alg.base.cra_driver_name);
-		}
-	}
-
-	return 0;
-
-fail2:
-	kfree(t_alg);
-fail1:
-	cc_aead_free(drvdata);
-fail0:
-	return rc;
-}

+ 0 - 109
drivers/staging/ccree/cc_aead.h

@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-/* \file cc_aead.h
- * ARM CryptoCell AEAD Crypto API
- */
-
-#ifndef __CC_AEAD_H__
-#define __CC_AEAD_H__
-
-#include <linux/kernel.h>
-#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-
-/* mac_cmp - HW writes 8 B but all bytes hold the same value */
-#define ICV_CMP_SIZE 8
-#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
-#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
-
-/* defines for AES GCM configuration buffer */
-#define GCM_BLOCK_LEN_SIZE 8
-
-#define GCM_BLOCK_RFC4_IV_OFFSET	4
-#define GCM_BLOCK_RFC4_IV_SIZE		8  /* IV size for rfc's */
-#define GCM_BLOCK_RFC4_NONCE_OFFSET	0
-#define GCM_BLOCK_RFC4_NONCE_SIZE	4
-
-/* Offsets into AES CCM configuration buffer */
-#define CCM_B0_OFFSET 0
-#define CCM_A0_OFFSET 16
-#define CCM_CTR_COUNT_0_OFFSET 32
-/* CCM B0 and CTR_COUNT constants. */
-#define CCM_BLOCK_NONCE_OFFSET 1  /* Nonce offset inside B0 and CTR_COUNT */
-#define CCM_BLOCK_NONCE_SIZE   3  /* Nonce size inside B0 and CTR_COUNT */
-#define CCM_BLOCK_IV_OFFSET    4  /* IV offset inside B0 and CTR_COUNT */
-#define CCM_BLOCK_IV_SIZE      8  /* IV size inside B0 and CTR_COUNT */
-
-enum aead_ccm_header_size {
-	ccm_header_size_null = -1,
-	ccm_header_size_zero = 0,
-	ccm_header_size_2 = 2,
-	ccm_header_size_6 = 6,
-	ccm_header_size_max = S32_MAX
-};
-
-struct aead_req_ctx {
-	/* Allocate cache line although only 4 bytes are needed to
-	 *  assure next field falls @ cache line
-	 *  Used for both: digest HW compare and CCM/GCM MAC value
-	 */
-	u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
-	u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
-
-	//used in gcm
-	u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
-	u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
-	u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
-	struct {
-		u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
-		u8 len_c[GCM_BLOCK_LEN_SIZE];
-	} gcm_len_block;
-
-	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
-	/* HW actual size input */
-	unsigned int hw_iv_size ____cacheline_aligned;
-	/* used to prevent cache coherence problem */
-	u8 backup_mac[MAX_MAC_SIZE];
-	u8 *backup_iv; /*store iv for generated IV flow*/
-	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
-	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
-	/* buffer for internal ccm configurations */
-	dma_addr_t ccm_iv0_dma_addr;
-	dma_addr_t icv_dma_addr; /* Phys. address of ICV */
-
-	//used in gcm
-	/* buffer for internal gcm configurations */
-	dma_addr_t gcm_iv_inc1_dma_addr;
-	/* buffer for internal gcm configurations */
-	dma_addr_t gcm_iv_inc2_dma_addr;
-	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
-	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
-	bool is_gcm4543;
-
-	u8 *icv_virt_addr; /* Virt. address of ICV */
-	struct async_gen_req_ctx gen_ctx;
-	struct cc_mlli assoc;
-	struct cc_mlli src;
-	struct cc_mlli dst;
-	struct scatterlist *src_sgl;
-	struct scatterlist *dst_sgl;
-	unsigned int src_offset;
-	unsigned int dst_offset;
-	enum cc_req_dma_buf_type assoc_buff_type;
-	enum cc_req_dma_buf_type data_buff_type;
-	struct mlli_params mlli_params;
-	unsigned int cryptlen;
-	struct scatterlist ccm_adata_sg;
-	enum aead_ccm_header_size ccm_hdr_size;
-	unsigned int req_authsize;
-	enum drv_cipher_mode cipher_mode;
-	bool is_icv_fragmented;
-	bool is_single_pass;
-	bool plaintext_authenticate_only; //for gcm_rfc4543
-};
-
-int cc_aead_alloc(struct cc_drvdata *drvdata);
-int cc_aead_free(struct cc_drvdata *drvdata);
-
-#endif /*__CC_AEAD_H__*/

+ 0 - 1651
drivers/staging/ccree/cc_buffer_mgr.c

@@ -1,1651 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-#include <crypto/internal/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <linux/dmapool.h>
-#include <linux/dma-mapping.h>
-
-#include "cc_buffer_mgr.h"
-#include "cc_lli_defs.h"
-#include "cc_cipher.h"
-#include "cc_hash.h"
-#include "cc_aead.h"
-
-enum dma_buffer_type {
-	DMA_NULL_TYPE = -1,
-	DMA_SGL_TYPE = 1,
-	DMA_BUFF_TYPE = 2,
-};
-
-struct buff_mgr_handle {
-	struct dma_pool *mlli_buffs_pool;
-};
-
-union buffer_array_entry {
-	struct scatterlist *sgl;
-	dma_addr_t buffer_dma;
-};
-
-struct buffer_array {
-	unsigned int num_of_buffers;
-	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
-};
-
-static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
-{
-	switch (type) {
-	case CC_DMA_BUF_NULL:
-		return "BUF_NULL";
-	case CC_DMA_BUF_DLLI:
-		return "BUF_DLLI";
-	case CC_DMA_BUF_MLLI:
-		return "BUF_MLLI";
-	default:
-		return "BUF_INVALID";
-	}
-}
-
-/**
- * cc_copy_mac() - Copy MAC to temporary location
- *
- * @dev: device object
- * @req: aead request object
- * @dir: [IN] copy from/to sgl
- */
-static void cc_copy_mac(struct device *dev, struct aead_request *req,
-			enum cc_sg_cpy_direct dir)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	u32 skip = req->assoclen + req->cryptlen;
-
-	if (areq_ctx->is_gcm4543)
-		skip += crypto_aead_ivsize(tfm);
-
-	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
-			   (skip - areq_ctx->req_authsize), skip, dir);
-}
-
-/**
- * cc_get_sgl_nents() - Get scatterlist number of entries.
- *
- * @sg_list: SG list
- * @nbytes: [IN] Total SGL data bytes.
- * @lbytes: [OUT] Returns the amount of bytes at the last entry
- */
-static unsigned int cc_get_sgl_nents(struct device *dev,
-				     struct scatterlist *sg_list,
-				     unsigned int nbytes, u32 *lbytes,
-				     bool *is_chained)
-{
-	unsigned int nents = 0;
-
-	while (nbytes && sg_list) {
-		if (sg_list->length) {
-			nents++;
-			/* get the number of bytes in the last entry */
-			*lbytes = nbytes;
-			nbytes -= (sg_list->length > nbytes) ?
-					nbytes : sg_list->length;
-			sg_list = sg_next(sg_list);
-		} else {
-			sg_list = (struct scatterlist *)sg_page(sg_list);
-			if (is_chained)
-				*is_chained = true;
-		}
-	}
-	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
-	return nents;
-}
-
-/**
- * cc_zero_sgl() - Zero scatter scatter list data.
- *
- * @sgl:
- */
-void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
-{
-	struct scatterlist *current_sg = sgl;
-	int sg_index = 0;
-
-	while (sg_index <= data_len) {
-		if (!current_sg) {
-			/* reached the end of the sgl --> just return back */
-			return;
-		}
-		memset(sg_virt(current_sg), 0, current_sg->length);
-		sg_index += current_sg->length;
-		current_sg = sg_next(current_sg);
-	}
-}
-
-/**
- * cc_copy_sg_portion() - Copy scatter list data,
- * from to_skip to end, to dest and vice versa
- *
- * @dest:
- * @sg:
- * @to_skip:
- * @end:
- * @direct:
- */
-void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
-			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
-{
-	u32 nents, lbytes;
-
-	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
-	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
-		       (direct == CC_SG_TO_BUF));
-}
-
-static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
-				  u32 buff_size, u32 *curr_nents,
-				  u32 **mlli_entry_pp)
-{
-	u32 *mlli_entry_p = *mlli_entry_pp;
-	u32 new_nents;
-
-	/* Verify there is no memory overflow*/
-	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
-	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
-		return -ENOMEM;
-
-	/*handle buffer longer than 64 kbytes */
-	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
-		cc_lli_set_addr(mlli_entry_p, buff_dma);
-		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
-		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
-			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
-			mlli_entry_p[LLI_WORD1_OFFSET]);
-		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
-		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
-		mlli_entry_p = mlli_entry_p + 2;
-		(*curr_nents)++;
-	}
-	/*Last entry */
-	cc_lli_set_addr(mlli_entry_p, buff_dma);
-	cc_lli_set_size(mlli_entry_p, buff_size);
-	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
-		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
-		mlli_entry_p[LLI_WORD1_OFFSET]);
-	mlli_entry_p = mlli_entry_p + 2;
-	*mlli_entry_pp = mlli_entry_p;
-	(*curr_nents)++;
-	return 0;
-}
-
-static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
-				u32 sgl_data_len, u32 sgl_offset,
-				u32 *curr_nents, u32 **mlli_entry_pp)
-{
-	struct scatterlist *curr_sgl = sgl;
-	u32 *mlli_entry_p = *mlli_entry_pp;
-	s32 rc = 0;
-
-	for ( ; (curr_sgl && sgl_data_len);
-	      curr_sgl = sg_next(curr_sgl)) {
-		u32 entry_data_len =
-			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
-				sg_dma_len(curr_sgl) - sgl_offset :
-				sgl_data_len;
-		sgl_data_len -= entry_data_len;
-		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
-					    sgl_offset, entry_data_len,
-					    curr_nents, &mlli_entry_p);
-		if (rc)
-			return rc;
-
-		sgl_offset = 0;
-	}
-	*mlli_entry_pp = mlli_entry_p;
-	return 0;
-}
-
-static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
-			    struct mlli_params *mlli_params, gfp_t flags)
-{
-	u32 *mlli_p;
-	u32 total_nents = 0, prev_total_nents = 0;
-	int rc = 0, i;
-
-	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
-
-	/* Allocate memory from the pointed pool */
-	mlli_params->mlli_virt_addr =
-		dma_pool_alloc(mlli_params->curr_pool, flags,
-			       &mlli_params->mlli_dma_addr);
-	if (!mlli_params->mlli_virt_addr) {
-		dev_err(dev, "dma_pool_alloc() failed\n");
-		rc = -ENOMEM;
-		goto build_mlli_exit;
-	}
-	/* Point to start of MLLI */
-	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
-	/* go over all SG's and link it to one MLLI table */
-	for (i = 0; i < sg_data->num_of_buffers; i++) {
-		union buffer_array_entry *entry = &sg_data->entry[i];
-		u32 tot_len = sg_data->total_data_len[i];
-		u32 offset = sg_data->offset[i];
-
-		if (sg_data->type[i] == DMA_SGL_TYPE)
-			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
-						  offset, &total_nents,
-						  &mlli_p);
-		else /*DMA_BUFF_TYPE*/
-			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
-						    tot_len, &total_nents,
-						    &mlli_p);
-		if (rc)
-			return rc;
-
-		/* set last bit in the current table */
-		if (sg_data->mlli_nents[i]) {
-			/*Calculate the current MLLI table length for the
-			 *length field in the descriptor
-			 */
-			*sg_data->mlli_nents[i] +=
-				(total_nents - prev_total_nents);
-			prev_total_nents = total_nents;
-		}
-	}
-
-	/* Set MLLI size for the bypass operation */
-	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
-
-	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
-		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
-		mlli_params->mlli_len);
-
-build_mlli_exit:
-	return rc;
-}
-
-static void cc_add_buffer_entry(struct device *dev,
-				struct buffer_array *sgl_data,
-				dma_addr_t buffer_dma, unsigned int buffer_len,
-				bool is_last_entry, u32 *mlli_nents)
-{
-	unsigned int index = sgl_data->num_of_buffers;
-
-	dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
-		index, &buffer_dma, buffer_len, is_last_entry);
-	sgl_data->nents[index] = 1;
-	sgl_data->entry[index].buffer_dma = buffer_dma;
-	sgl_data->offset[index] = 0;
-	sgl_data->total_data_len[index] = buffer_len;
-	sgl_data->type[index] = DMA_BUFF_TYPE;
-	sgl_data->is_last[index] = is_last_entry;
-	sgl_data->mlli_nents[index] = mlli_nents;
-	if (sgl_data->mlli_nents[index])
-		*sgl_data->mlli_nents[index] = 0;
-	sgl_data->num_of_buffers++;
-}
-
-static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
-			    unsigned int nents, struct scatterlist *sgl,
-			    unsigned int data_len, unsigned int data_offset,
-			    bool is_last_table, u32 *mlli_nents)
-{
-	unsigned int index = sgl_data->num_of_buffers;
-
-	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
-		index, nents, sgl, data_len, is_last_table);
-	sgl_data->nents[index] = nents;
-	sgl_data->entry[index].sgl = sgl;
-	sgl_data->offset[index] = data_offset;
-	sgl_data->total_data_len[index] = data_len;
-	sgl_data->type[index] = DMA_SGL_TYPE;
-	sgl_data->is_last[index] = is_last_table;
-	sgl_data->mlli_nents[index] = mlli_nents;
-	if (sgl_data->mlli_nents[index])
-		*sgl_data->mlli_nents[index] = 0;
-	sgl_data->num_of_buffers++;
-}
-
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
-			 enum dma_data_direction direction)
-{
-	u32 i, j;
-	struct scatterlist *l_sg = sg;
-
-	for (i = 0; i < nents; i++) {
-		if (!l_sg)
-			break;
-		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
-			dev_err(dev, "dma_map_page() sg buffer failed\n");
-			goto err;
-		}
-		l_sg = sg_next(l_sg);
-	}
-	return nents;
-
-err:
-	/* Restore mapped parts */
-	for (j = 0; j < i; j++) {
-		if (!sg)
-			break;
-		dma_unmap_sg(dev, sg, 1, direction);
-		sg = sg_next(sg);
-	}
-	return 0;
-}
-
-static int cc_map_sg(struct device *dev, struct scatterlist *sg,
-		     unsigned int nbytes, int direction, u32 *nents,
-		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
-{
-	bool is_chained = false;
-
-	if (sg_is_last(sg)) {
-		/* One entry only case -set to DLLI */
-		if (dma_map_sg(dev, sg, 1, direction) != 1) {
-			dev_err(dev, "dma_map_sg() single buffer failed\n");
-			return -ENOMEM;
-		}
-		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
-			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
-			sg->offset, sg->length);
-		*lbytes = nbytes;
-		*nents = 1;
-		*mapped_nents = 1;
-	} else {  /*sg_is_last*/
-		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
-					  &is_chained);
-		if (*nents > max_sg_nents) {
-			*nents = 0;
-			dev_err(dev, "Too many fragments. current %d max %d\n",
-				*nents, max_sg_nents);
-			return -ENOMEM;
-		}
-		if (!is_chained) {
-			/* In case of mmu the number of mapped nents might
-			 * be changed from the original sgl nents
-			 */
-			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-			if (*mapped_nents == 0) {
-				*nents = 0;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
-		} else {
-			/*In this case the driver maps entry by entry so it
-			 * must have the same nents before and after map
-			 */
-			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
-						      direction);
-			if (*mapped_nents != *nents) {
-				*nents = *mapped_nents;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int
-cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
-		     u8 *config_data, struct buffer_array *sg_data,
-		     unsigned int assoclen)
-{
-	dev_dbg(dev, " handle additional data config set to DLLI\n");
-	/* create sg for the current buffer */
-	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
-		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
-	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
-		dev_err(dev, "dma_map_sg() config buffer failed\n");
-		return -ENOMEM;
-	}
-	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
-		&sg_dma_address(&areq_ctx->ccm_adata_sg),
-		sg_page(&areq_ctx->ccm_adata_sg),
-		sg_virt(&areq_ctx->ccm_adata_sg),
-		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
-	/* prepare for case of MLLI */
-	if (assoclen > 0) {
-		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
-				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
-				0, false, NULL);
-	}
-	return 0;
-}
-
-static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
-			   u8 *curr_buff, u32 curr_buff_cnt,
-			   struct buffer_array *sg_data)
-{
-	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
-	/* create sg for the current buffer */
-	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
-	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
-		dev_err(dev, "dma_map_sg() src buffer failed\n");
-		return -ENOMEM;
-	}
-	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
-		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
-		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
-		areq_ctx->buff_sg->length);
-	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
-	areq_ctx->curr_sg = areq_ctx->buff_sg;
-	areq_ctx->in_nents = 0;
-	/* prepare for case of MLLI */
-	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
-			false, NULL);
-	return 0;
-}
-
-void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
-				unsigned int ivsize, struct scatterlist *src,
-				struct scatterlist *dst)
-{
-	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
-
-	if (req_ctx->gen_ctx.iv_dma_addr) {
-		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
-			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
-		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
-				 ivsize,
-				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
-				 DMA_TO_DEVICE);
-	}
-	/* Release pool */
-	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
-	    req_ctx->mlli_params.mlli_virt_addr) {
-		dma_pool_free(req_ctx->mlli_params.curr_pool,
-			      req_ctx->mlli_params.mlli_virt_addr,
-			      req_ctx->mlli_params.mlli_dma_addr);
-	}
-
-	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
-	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
-
-	if (src != dst) {
-		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
-		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
-	}
-}
-
-int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
-			     unsigned int ivsize, unsigned int nbytes,
-			     void *info, struct scatterlist *src,
-			     struct scatterlist *dst, gfp_t flags)
-{
-	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
-	struct mlli_params *mlli_params = &req_ctx->mlli_params;
-	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-	struct device *dev = drvdata_to_dev(drvdata);
-	struct buffer_array sg_data;
-	u32 dummy = 0;
-	int rc = 0;
-	u32 mapped_nents = 0;
-
-	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
-	mlli_params->curr_pool = NULL;
-	sg_data.num_of_buffers = 0;
-
-	/* Map IV buffer */
-	if (ivsize) {
-		dump_byte_array("iv", (u8 *)info, ivsize);
-		req_ctx->gen_ctx.iv_dma_addr =
-			dma_map_single(dev, (void *)info,
-				       ivsize,
-				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
-				       DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
-			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
-				ivsize, info);
-			return -ENOMEM;
-		}
-		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
-			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
-	} else {
-		req_ctx->gen_ctx.iv_dma_addr = 0;
-	}
-
-	/* Map the src SGL */
-	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
-		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
-	if (rc) {
-		rc = -ENOMEM;
-		goto ablkcipher_exit;
-	}
-	if (mapped_nents > 1)
-		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
-
-	if (src == dst) {
-		/* Handle inplace operation */
-		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
-			req_ctx->out_nents = 0;
-			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
-					nbytes, 0, true,
-					&req_ctx->in_mlli_nents);
-		}
-	} else {
-		/* Map the dst sg */
-		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
-			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
-			      &dummy, &mapped_nents)) {
-			rc = -ENOMEM;
-			goto ablkcipher_exit;
-		}
-		if (mapped_nents > 1)
-			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
-
-		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
-			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
-					nbytes, 0, true,
-					&req_ctx->in_mlli_nents);
-			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
-					nbytes, 0, true,
-					&req_ctx->out_mlli_nents);
-		}
-	}
-
-	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
-		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
-		if (rc)
-			goto ablkcipher_exit;
-	}
-
-	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
-		cc_dma_buf_type(req_ctx->dma_buf_type));
-
-	return 0;
-
-ablkcipher_exit:
-	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-	return rc;
-}
-
-void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-	u32 dummy;
-	bool chained;
-	u32 size_to_unmap = 0;
-
-	if (areq_ctx->mac_buf_dma_addr) {
-		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
-				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
-	}
-
-	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
-		if (areq_ctx->hkey_dma_addr) {
-			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
-					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
-		}
-
-		if (areq_ctx->gcm_block_len_dma_addr) {
-			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
-					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
-		}
-
-		if (areq_ctx->gcm_iv_inc1_dma_addr) {
-			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
-					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
-		}
-
-		if (areq_ctx->gcm_iv_inc2_dma_addr) {
-			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
-					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
-		}
-	}
-
-	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		if (areq_ctx->ccm_iv0_dma_addr) {
-			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
-					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
-		}
-
-		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
-	}
-	if (areq_ctx->gen_ctx.iv_dma_addr) {
-		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
-				 hw_iv_size, DMA_BIDIRECTIONAL);
-	}
-
-	/*In case a pool was set, a table was
-	 *allocated and should be released
-	 */
-	if (areq_ctx->mlli_params.curr_pool) {
-		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
-			&areq_ctx->mlli_params.mlli_dma_addr,
-			areq_ctx->mlli_params.mlli_virt_addr);
-		dma_pool_free(areq_ctx->mlli_params.curr_pool,
-			      areq_ctx->mlli_params.mlli_virt_addr,
-			      areq_ctx->mlli_params.mlli_dma_addr);
-	}
-
-	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
-		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
-		req->assoclen, req->cryptlen);
-	size_to_unmap = req->assoclen + req->cryptlen;
-	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
-		size_to_unmap += areq_ctx->req_authsize;
-	if (areq_ctx->is_gcm4543)
-		size_to_unmap += crypto_aead_ivsize(tfm);
-
-	dma_unmap_sg(dev, req->src,
-		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
-				      &dummy, &chained),
-		     DMA_BIDIRECTIONAL);
-	if (req->src != req->dst) {
-		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
-			sg_virt(req->dst));
-		dma_unmap_sg(dev, req->dst,
-			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
-					      &dummy, &chained),
-			     DMA_BIDIRECTIONAL);
-	}
-	if (drvdata->coherent &&
-	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
-	    req->src == req->dst) {
-		/* copy back mac from temporary location to deal with possible
-		 * data memory overriding that caused by cache coherence
-		 * problem.
-		 */
-		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
-	}
-}
-
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
-				 unsigned int sgl_nents, unsigned int authsize,
-				 u32 last_entry_data_size,
-				 bool *is_icv_fragmented)
-{
-	unsigned int icv_max_size = 0;
-	unsigned int icv_required_size = authsize > last_entry_data_size ?
-					(authsize - last_entry_data_size) :
-					authsize;
-	unsigned int nents;
-	unsigned int i;
-
-	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
-		*is_icv_fragmented = false;
-		return 0;
-	}
-
-	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
-		if (!sgl)
-			break;
-		sgl = sg_next(sgl);
-	}
-
-	if (sgl)
-		icv_max_size = sgl->length;
-
-	if (last_entry_data_size > authsize) {
-		/* ICV attached to data in last entry (not fragmented!) */
-		nents = 0;
-		*is_icv_fragmented = false;
-	} else if (last_entry_data_size == authsize) {
-		/* ICV placed in whole last entry (not fragmented!) */
-		nents = 1;
-		*is_icv_fragmented = false;
-	} else if (icv_max_size > icv_required_size) {
-		nents = 1;
-		*is_icv_fragmented = true;
-	} else if (icv_max_size == icv_required_size) {
-		nents = 2;
-		*is_icv_fragmented = true;
-	} else {
-		dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
-			MAX_ICV_NENTS_SUPPORTED);
-		nents = -1; /*unsupported*/
-	}
-	dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
-		(*is_icv_fragmented ? "true" : "false"), nents);
-
-	return nents;
-}
-
-static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
-			    struct aead_request *req,
-			    struct buffer_array *sg_data,
-			    bool is_last, bool do_chain)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
-	struct device *dev = drvdata_to_dev(drvdata);
-	int rc = 0;
-
-	if (!req->iv) {
-		areq_ctx->gen_ctx.iv_dma_addr = 0;
-		goto chain_iv_exit;
-	}
-
-	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
-						       hw_iv_size,
-						       DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
-		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
-			hw_iv_size, req->iv);
-		rc = -ENOMEM;
-		goto chain_iv_exit;
-	}
-
-	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
-		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
-	// TODO: what about CTR?? ask Ron
-	if (do_chain && areq_ctx->plaintext_authenticate_only) {
-		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
-		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
-		/* Chain to given list */
-		cc_add_buffer_entry(dev, sg_data,
-				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
-				    iv_size_to_authenc, is_last,
-				    &areq_ctx->assoc.mlli_nents);
-		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
-	}
-
-chain_iv_exit:
-	return rc;
-}
-
-static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
-			       struct aead_request *req,
-			       struct buffer_array *sg_data,
-			       bool is_last, bool do_chain)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	int rc = 0;
-	u32 mapped_nents = 0;
-	struct scatterlist *current_sg = req->src;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	unsigned int sg_index = 0;
-	u32 size_of_assoc = req->assoclen;
-	struct device *dev = drvdata_to_dev(drvdata);
-
-	if (areq_ctx->is_gcm4543)
-		size_of_assoc += crypto_aead_ivsize(tfm);
-
-	if (!sg_data) {
-		rc = -EINVAL;
-		goto chain_assoc_exit;
-	}
-
-	if (req->assoclen == 0) {
-		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
-		areq_ctx->assoc.nents = 0;
-		areq_ctx->assoc.mlli_nents = 0;
-		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
-			cc_dma_buf_type(areq_ctx->assoc_buff_type),
-			areq_ctx->assoc.nents);
-		goto chain_assoc_exit;
-	}
-
-	//iterate over the sgl to see how many entries are for associated data
-	//it is assumed that if we reach here , the sgl is already mapped
-	sg_index = current_sg->length;
-	//the first entry in the scatter list contains all the associated data
-	if (sg_index > size_of_assoc) {
-		mapped_nents++;
-	} else {
-		while (sg_index <= size_of_assoc) {
-			current_sg = sg_next(current_sg);
-			/* if have reached the end of the sgl, then this is
-			 * unexpected
-			 */
-			if (!current_sg) {
-				dev_err(dev, "reached end of sg list. unexpected\n");
-				return -EINVAL;
-			}
-			sg_index += current_sg->length;
-			mapped_nents++;
-		}
-	}
-	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
-		dev_err(dev, "Too many fragments. current %d max %d\n",
-			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
-		return -ENOMEM;
-	}
-	areq_ctx->assoc.nents = mapped_nents;
-
-	/* in CCM case we have additional entry for
-	 * ccm header configurations
-	 */
-	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
-			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
-				(areq_ctx->assoc.nents + 1),
-				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
-			rc = -ENOMEM;
-			goto chain_assoc_exit;
-		}
-	}
-
-	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
-		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
-	else
-		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
-
-	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
-		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
-			cc_dma_buf_type(areq_ctx->assoc_buff_type),
-			areq_ctx->assoc.nents);
-		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
-				req->assoclen, 0, is_last,
-				&areq_ctx->assoc.mlli_nents);
-		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
-	}
-
-chain_assoc_exit:
-	return rc;
-}
-
-static void cc_prepare_aead_data_dlli(struct aead_request *req,
-				      u32 *src_last_bytes, u32 *dst_last_bytes)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
-	unsigned int authsize = areq_ctx->req_authsize;
-
-	areq_ctx->is_icv_fragmented = false;
-	if (req->src == req->dst) {
-		/*INPLACE*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		/*NON-INPLACE and DECRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-	} else {
-		/*NON-INPLACE and ENCRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
-			(*dst_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
-			(*dst_last_bytes - authsize);
-	}
-}
-
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
-				     struct aead_request *req,
-				     struct buffer_array *sg_data,
-				     u32 *src_last_bytes, u32 *dst_last_bytes,
-				     bool is_last_table)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
-	unsigned int authsize = areq_ctx->req_authsize;
-	int rc = 0, icv_nents;
-	struct device *dev = drvdata_to_dev(drvdata);
-	struct scatterlist *sg;
-
-	if (req->src == req->dst) {
-		/*INPLACE*/
-		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
-				areq_ctx->src_sgl, areq_ctx->cryptlen,
-				areq_ctx->src_offset, is_last_table,
-				&areq_ctx->src.mlli_nents);
-
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
-						  areq_ctx->src.nents,
-						  authsize, *src_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
-
-		if (areq_ctx->is_icv_fragmented) {
-			/* Backup happens only when ICV is fragmented, ICV
-			 * verification is made by CPU compare in order to
-			 * simplify MAC verification upon request completion
-			 */
-			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-				/* In coherent platforms (e.g. ACP)
-				 * already copying ICV for any
-				 * INPLACE-DECRYPT operation, hence
-				 * we must neglect this code.
-				 */
-				if (!drvdata->coherent)
-					cc_copy_mac(dev, req, CC_SG_TO_BUF);
-
-				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
-			} else {
-				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
-				areq_ctx->icv_dma_addr =
-					areq_ctx->mac_buf_dma_addr;
-			}
-		} else { /* Contig. ICV */
-			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
-			/*Should hanlde if the sg is not contig.*/
-			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
-				(*src_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(sg) +
-				(*src_last_bytes - authsize);
-		}
-
-	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		/*NON-INPLACE and DECRYPT*/
-		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
-				areq_ctx->src_sgl, areq_ctx->cryptlen,
-				areq_ctx->src_offset, is_last_table,
-				&areq_ctx->src.mlli_nents);
-		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
-				areq_ctx->dst_sgl, areq_ctx->cryptlen,
-				areq_ctx->dst_offset, is_last_table,
-				&areq_ctx->dst.mlli_nents);
-
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
-						  areq_ctx->src.nents,
-						  authsize, *src_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
-
-		/* Backup happens only when ICV is fragmented, ICV
-		 * verification is made by CPU compare in order to simplify
-		 * MAC verification upon request completion
-		 */
-		if (areq_ctx->is_icv_fragmented) {
-			cc_copy_mac(dev, req, CC_SG_TO_BUF);
-			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
-
-		} else { /* Contig. ICV */
-			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
-			/*Should hanlde if the sg is not contig.*/
-			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
-				(*src_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(sg) +
-				(*src_last_bytes - authsize);
-		}
-
-	} else {
-		/*NON-INPLACE and ENCRYPT*/
-		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
-				areq_ctx->dst_sgl, areq_ctx->cryptlen,
-				areq_ctx->dst_offset, is_last_table,
-				&areq_ctx->dst.mlli_nents);
-		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
-				areq_ctx->src_sgl, areq_ctx->cryptlen,
-				areq_ctx->src_offset, is_last_table,
-				&areq_ctx->src.mlli_nents);
-
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
-						  areq_ctx->dst.nents,
-						  authsize, *dst_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
-
-		if (!areq_ctx->is_icv_fragmented) {
-			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
-			/* Contig. ICV */
-			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
-				(*dst_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(sg) +
-				(*dst_last_bytes - authsize);
-		} else {
-			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
-			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
-		}
-	}
-
-prepare_data_mlli_exit:
-	return rc;
-}
-
-static int cc_aead_chain_data(struct cc_drvdata *drvdata,
-			      struct aead_request *req,
-			      struct buffer_array *sg_data,
-			      bool is_last_table, bool do_chain)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct device *dev = drvdata_to_dev(drvdata);
-	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
-	unsigned int authsize = areq_ctx->req_authsize;
-	int src_last_bytes = 0, dst_last_bytes = 0;
-	int rc = 0;
-	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
-	u32 offset = 0;
-	/* non-inplace mode */
-	unsigned int size_for_map = req->assoclen + req->cryptlen;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	u32 sg_index = 0;
-	bool chained = false;
-	bool is_gcm4543 = areq_ctx->is_gcm4543;
-	u32 size_to_skip = req->assoclen;
-
-	if (is_gcm4543)
-		size_to_skip += crypto_aead_ivsize(tfm);
-
-	offset = size_to_skip;
-
-	if (!sg_data)
-		return -EINVAL;
-
-	areq_ctx->src_sgl = req->src;
-	areq_ctx->dst_sgl = req->dst;
-
-	if (is_gcm4543)
-		size_for_map += crypto_aead_ivsize(tfm);
-
-	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-			authsize : 0;
-	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
-					    &src_last_bytes, &chained);
-	sg_index = areq_ctx->src_sgl->length;
-	//check where the data starts
-	while (sg_index <= size_to_skip) {
-		offset -= areq_ctx->src_sgl->length;
-		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
-		//if have reached the end of the sgl, then this is unexpected
-		if (!areq_ctx->src_sgl) {
-			dev_err(dev, "reached end of sg list. unexpected\n");
-			return -EINVAL;
-		}
-		sg_index += areq_ctx->src_sgl->length;
-		src_mapped_nents--;
-	}
-	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
-		dev_err(dev, "Too many fragments. current %d max %d\n",
-			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
-			return -ENOMEM;
-	}
-
-	areq_ctx->src.nents = src_mapped_nents;
-
-	areq_ctx->src_offset = offset;
-
-	if (req->src != req->dst) {
-		size_for_map = req->assoclen + req->cryptlen;
-		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-				authsize : 0;
-		if (is_gcm4543)
-			size_for_map += crypto_aead_ivsize(tfm);
-
-		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
-			       &areq_ctx->dst.nents,
-			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
-			       &dst_mapped_nents);
-		if (rc) {
-			rc = -ENOMEM;
-			goto chain_data_exit;
-		}
-	}
-
-	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
-					    &dst_last_bytes, &chained);
-	sg_index = areq_ctx->dst_sgl->length;
-	offset = size_to_skip;
-
-	//check where the data starts
-	while (sg_index <= size_to_skip) {
-		offset -= areq_ctx->dst_sgl->length;
-		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
-		//if have reached the end of the sgl, then this is unexpected
-		if (!areq_ctx->dst_sgl) {
-			dev_err(dev, "reached end of sg list. unexpected\n");
-			return -EINVAL;
-		}
-		sg_index += areq_ctx->dst_sgl->length;
-		dst_mapped_nents--;
-	}
-	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
-		dev_err(dev, "Too many fragments. current %d max %d\n",
-			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
-		return -ENOMEM;
-	}
-	areq_ctx->dst.nents = dst_mapped_nents;
-	areq_ctx->dst_offset = offset;
-	if (src_mapped_nents > 1 ||
-	    dst_mapped_nents  > 1 ||
-	    do_chain) {
-		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
-		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
-					       &src_last_bytes,
-					       &dst_last_bytes, is_last_table);
-	} else {
-		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
-		cc_prepare_aead_data_dlli(req, &src_last_bytes,
-					  &dst_last_bytes);
-	}
-
-chain_data_exit:
-	return rc;
-}
-
-static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
-				      struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	u32 curr_mlli_size = 0;
-
-	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
-		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
-		curr_mlli_size = areq_ctx->assoc.mlli_nents *
-						LLI_ENTRY_BYTE_SIZE;
-	}
-
-	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
-		/*Inplace case dst nents equal to src nents*/
-		if (req->src == req->dst) {
-			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
-			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
-								curr_mlli_size;
-			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
-			if (!areq_ctx->is_single_pass)
-				areq_ctx->assoc.mlli_nents +=
-					areq_ctx->src.mlli_nents;
-		} else {
-			if (areq_ctx->gen_ctx.op_type ==
-					DRV_CRYPTO_DIRECTION_DECRYPT) {
-				areq_ctx->src.sram_addr =
-						drvdata->mlli_sram_addr +
-								curr_mlli_size;
-				areq_ctx->dst.sram_addr =
-						areq_ctx->src.sram_addr +
-						areq_ctx->src.mlli_nents *
-						LLI_ENTRY_BYTE_SIZE;
-				if (!areq_ctx->is_single_pass)
-					areq_ctx->assoc.mlli_nents +=
-						areq_ctx->src.mlli_nents;
-			} else {
-				areq_ctx->dst.sram_addr =
-						drvdata->mlli_sram_addr +
-								curr_mlli_size;
-				areq_ctx->src.sram_addr =
-						areq_ctx->dst.sram_addr +
-						areq_ctx->dst.mlli_nents *
-						LLI_ENTRY_BYTE_SIZE;
-				if (!areq_ctx->is_single_pass)
-					areq_ctx->assoc.mlli_nents +=
-						areq_ctx->dst.mlli_nents;
-			}
-		}
-	}
-}
-
-int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
-{
-	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
-	struct device *dev = drvdata_to_dev(drvdata);
-	struct buffer_array sg_data;
-	unsigned int authsize = areq_ctx->req_authsize;
-	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-	int rc = 0;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	bool is_gcm4543 = areq_ctx->is_gcm4543;
-	dma_addr_t dma_addr;
-	u32 mapped_nents = 0;
-	u32 dummy = 0; /*used for the assoc data fragments */
-	u32 size_to_map = 0;
-	gfp_t flags = cc_gfp_flags(&req->base);
-
-	mlli_params->curr_pool = NULL;
-	sg_data.num_of_buffers = 0;
-
-	/* copy mac to a temporary location to deal with possible
-	 * data memory overriding that caused by cache coherence problem.
-	 */
-	if (drvdata->coherent &&
-	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
-	    req->src == req->dst)
-		cc_copy_mac(dev, req, CC_SG_TO_BUF);
-
-	/* cacluate the size for cipher remove ICV in decrypt*/
-	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-				req->cryptlen :
-				(req->cryptlen - authsize);
-
-	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
-				  DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dev, dma_addr)) {
-		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
-			MAX_MAC_SIZE, areq_ctx->mac_buf);
-		rc = -ENOMEM;
-		goto aead_map_failure;
-	}
-	areq_ctx->mac_buf_dma_addr = dma_addr;
-
-	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
-
-		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
-					  DMA_TO_DEVICE);
-
-		if (dma_mapping_error(dev, dma_addr)) {
-			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE, addr);
-			areq_ctx->ccm_iv0_dma_addr = 0;
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-		areq_ctx->ccm_iv0_dma_addr = dma_addr;
-
-		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
-					 &sg_data, req->assoclen)) {
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-	}
-
-	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
-		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
-					  DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(dev, dma_addr)) {
-			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE, areq_ctx->hkey);
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-		areq_ctx->hkey_dma_addr = dma_addr;
-
-		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
-					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_addr)) {
-			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-		areq_ctx->gcm_block_len_dma_addr = dma_addr;
-
-		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
-					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
-
-		if (dma_mapping_error(dev, dma_addr)) {
-			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
-			areq_ctx->gcm_iv_inc1_dma_addr = 0;
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
-
-		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
-					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
-
-		if (dma_mapping_error(dev, dma_addr)) {
-			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
-			areq_ctx->gcm_iv_inc2_dma_addr = 0;
-			rc = -ENOMEM;
-			goto aead_map_failure;
-		}
-		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
-	}
-
-	size_to_map = req->cryptlen + req->assoclen;
-	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
-		size_to_map += authsize;
-
-	if (is_gcm4543)
-		size_to_map += crypto_aead_ivsize(tfm);
-	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
-		       &areq_ctx->src.nents,
-		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
-			LLI_MAX_NUM_OF_DATA_ENTRIES),
-		       &dummy, &mapped_nents);
-	if (rc) {
-		rc = -ENOMEM;
-		goto aead_map_failure;
-	}
-
-	if (areq_ctx->is_single_pass) {
-		/*
-		 * Create MLLI table for:
-		 *   (1) Assoc. data
-		 *   (2) Src/Dst SGLs
-		 *   Note: IV is contg. buffer (not an SGL)
-		 */
-		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
-		if (rc)
-			goto aead_map_failure;
-		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
-		if (rc)
-			goto aead_map_failure;
-		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
-		if (rc)
-			goto aead_map_failure;
-	} else { /* DOUBLE-PASS flow */
-		/*
-		 * Prepare MLLI table(s) in this order:
-		 *
-		 * If ENCRYPT/DECRYPT (inplace):
-		 *   (1) MLLI table for assoc
-		 *   (2) IV entry (chained right after end of assoc)
-		 *   (3) MLLI for src/dst (inplace operation)
-		 *
-		 * If ENCRYPT (non-inplace)
-		 *   (1) MLLI table for assoc
-		 *   (2) IV entry (chained right after end of assoc)
-		 *   (3) MLLI for dst
-		 *   (4) MLLI for src
-		 *
-		 * If DECRYPT (non-inplace)
-		 *   (1) MLLI table for assoc
-		 *   (2) IV entry (chained right after end of assoc)
-		 *   (3) MLLI for src
-		 *   (4) MLLI for dst
-		 */
-		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
-		if (rc)
-			goto aead_map_failure;
-		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
-		if (rc)
-			goto aead_map_failure;
-		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
-		if (rc)
-			goto aead_map_failure;
-	}
-
-	/* Mlli support -start building the MLLI according to the above
-	 * results
-	 */
-	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
-	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
-		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
-		if (rc)
-			goto aead_map_failure;
-
-		cc_update_aead_mlli_nents(drvdata, req);
-		dev_dbg(dev, "assoc params mn %d\n",
-			areq_ctx->assoc.mlli_nents);
-		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
-		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
-	}
-	return 0;
-
-aead_map_failure:
-	cc_unmap_aead_request(dev, req);
-	return rc;
-}
-
-int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
-			      struct scatterlist *src, unsigned int nbytes,
-			      bool do_update, gfp_t flags)
-{
-	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-	struct device *dev = drvdata_to_dev(drvdata);
-	u8 *curr_buff = cc_hash_buf(areq_ctx);
-	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
-	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
-	struct buffer_array sg_data;
-	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-	u32 dummy = 0;
-	u32 mapped_nents = 0;
-
-	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
-		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
-	/* Init the type of the dma buffer */
-	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
-	mlli_params->curr_pool = NULL;
-	sg_data.num_of_buffers = 0;
-	areq_ctx->in_nents = 0;
-
-	if (nbytes == 0 && *curr_buff_cnt == 0) {
-		/* nothing to do */
-		return 0;
-	}
-
-	/*TODO: copy data in case that buffer is enough for operation */
-	/* map the previous buffer */
-	if (*curr_buff_cnt) {
-		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
-				    &sg_data)) {
-			return -ENOMEM;
-		}
-	}
-
-	if (src && nbytes > 0 && do_update) {
-		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
-			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
-			      &dummy, &mapped_nents)) {
-			goto unmap_curr_buff;
-		}
-		if (src && mapped_nents == 1 &&
-		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
-			memcpy(areq_ctx->buff_sg, src,
-			       sizeof(struct scatterlist));
-			areq_ctx->buff_sg->length = nbytes;
-			areq_ctx->curr_sg = areq_ctx->buff_sg;
-			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
-		} else {
-			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
-		}
-	}
-
-	/*build mlli */
-	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
-		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		/* add the src data to the sg_data */
-		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
-				0, true, &areq_ctx->mlli_nents);
-		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
-			goto fail_unmap_din;
-	}
-	/* change the buffer index for the unmap function */
-	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
-	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
-		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
-	return 0;
-
-fail_unmap_din:
-	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
-
-unmap_curr_buff:
-	if (*curr_buff_cnt)
-		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
-
-	return -ENOMEM;
-}
-
-int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
-			       struct scatterlist *src, unsigned int nbytes,
-			       unsigned int block_size, gfp_t flags)
-{
-	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-	struct device *dev = drvdata_to_dev(drvdata);
-	u8 *curr_buff = cc_hash_buf(areq_ctx);
-	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
-	u8 *next_buff = cc_next_buf(areq_ctx);
-	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
-	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
-	unsigned int update_data_len;
-	u32 total_in_len = nbytes + *curr_buff_cnt;
-	struct buffer_array sg_data;
-	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-	unsigned int swap_index = 0;
-	u32 dummy = 0;
-	u32 mapped_nents = 0;
-
-	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
-		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
-	/* Init the type of the dma buffer */
-	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
-	mlli_params->curr_pool = NULL;
-	areq_ctx->curr_sg = NULL;
-	sg_data.num_of_buffers = 0;
-	areq_ctx->in_nents = 0;
-
-	if (total_in_len < block_size) {
-		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
-			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
-		areq_ctx->in_nents =
-			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
-		sg_copy_to_buffer(src, areq_ctx->in_nents,
-				  &curr_buff[*curr_buff_cnt], nbytes);
-		*curr_buff_cnt += nbytes;
-		return 1;
-	}
-
-	/* Calculate the residue size*/
-	*next_buff_cnt = total_in_len & (block_size - 1);
-	/* update data len */
-	update_data_len = total_in_len - *next_buff_cnt;
-
-	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
-		*next_buff_cnt, update_data_len);
-
-	/* Copy the new residue to next buffer */
-	if (*next_buff_cnt) {
-		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
-			next_buff, (update_data_len - *curr_buff_cnt),
-			*next_buff_cnt);
-		cc_copy_sg_portion(dev, next_buff, src,
-				   (update_data_len - *curr_buff_cnt),
-				   nbytes, CC_SG_TO_BUF);
-		/* change the buffer index for next operation */
-		swap_index = 1;
-	}
-
-	if (*curr_buff_cnt) {
-		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
-				    &sg_data)) {
-			return -ENOMEM;
-		}
-		/* change the buffer index for next operation */
-		swap_index = 1;
-	}
-
-	if (update_data_len > *curr_buff_cnt) {
-		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
-			      DMA_TO_DEVICE, &areq_ctx->in_nents,
-			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
-			      &mapped_nents)) {
-			goto unmap_curr_buff;
-		}
-		if (mapped_nents == 1 &&
-		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
-			/* only one entry in the SG and no previous data */
-			memcpy(areq_ctx->buff_sg, src,
-			       sizeof(struct scatterlist));
-			areq_ctx->buff_sg->length = update_data_len;
-			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
-			areq_ctx->curr_sg = areq_ctx->buff_sg;
-		} else {
-			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
-		}
-	}
-
-	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
-		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		/* add the src data to the sg_data */
-		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
-				(update_data_len - *curr_buff_cnt), 0, true,
-				&areq_ctx->mlli_nents);
-		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
-			goto fail_unmap_din;
-	}
-	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
-
-	return 0;
-
-fail_unmap_din:
-	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
-
-unmap_curr_buff:
-	if (*curr_buff_cnt)
-		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
-
-	return -ENOMEM;
-}
-
-void cc_unmap_hash_request(struct device *dev, void *ctx,
-			   struct scatterlist *src, bool do_revert)
-{
-	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
-
-	/*In case a pool was set, a table was
-	 *allocated and should be released
-	 */
-	if (areq_ctx->mlli_params.curr_pool) {
-		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
-			&areq_ctx->mlli_params.mlli_dma_addr,
-			areq_ctx->mlli_params.mlli_virt_addr);
-		dma_pool_free(areq_ctx->mlli_params.curr_pool,
-			      areq_ctx->mlli_params.mlli_virt_addr,
-			      areq_ctx->mlli_params.mlli_dma_addr);
-	}
-
-	if (src && areq_ctx->in_nents) {
-		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
-			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
-		dma_unmap_sg(dev, src,
-			     areq_ctx->in_nents, DMA_TO_DEVICE);
-	}
-
-	if (*prev_len) {
-		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
-			sg_virt(areq_ctx->buff_sg),
-			&sg_dma_address(areq_ctx->buff_sg),
-			sg_dma_len(areq_ctx->buff_sg));
-		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
-		if (!do_revert) {
-			/* clean the previous data length for update
-			 * operation
-			 */
-			*prev_len = 0;
-		} else {
-			areq_ctx->buff_index ^= 1;
-		}
-	}
-}
-
-int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
-{
-	struct buff_mgr_handle *buff_mgr_handle;
-	struct device *dev = drvdata_to_dev(drvdata);
-
-	buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
-	if (!buff_mgr_handle)
-		return -ENOMEM;
-
-	drvdata->buff_mgr_handle = buff_mgr_handle;
-
-	buff_mgr_handle->mlli_buffs_pool =
-		dma_pool_create("dx_single_mlli_tables", dev,
-				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
-				LLI_ENTRY_BYTE_SIZE,
-				MLLI_TABLE_MIN_ALIGNMENT, 0);
-
-	if (!buff_mgr_handle->mlli_buffs_pool)
-		goto error;
-
-	return 0;
-
-error:
-	cc_buffer_mgr_fini(drvdata);
-	return -ENOMEM;
-}
-
-int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
-{
-	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
-
-	if (buff_mgr_handle) {
-		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
-		kfree(drvdata->buff_mgr_handle);
-		drvdata->buff_mgr_handle = NULL;
-	}
-	return 0;
-}

+ 0 - 74
drivers/staging/ccree/cc_buffer_mgr.h

@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-/* \file cc_buffer_mgr.h
- * Buffer Manager
- */
-
-#ifndef __CC_BUFFER_MGR_H__
-#define __CC_BUFFER_MGR_H__
-
-#include <crypto/algapi.h>
-
-#include "cc_driver.h"
-
-enum cc_req_dma_buf_type {
-	CC_DMA_BUF_NULL = 0,
-	CC_DMA_BUF_DLLI,
-	CC_DMA_BUF_MLLI
-};
-
-enum cc_sg_cpy_direct {
-	CC_SG_TO_BUF = 0,
-	CC_SG_FROM_BUF = 1
-};
-
-struct cc_mlli {
-	cc_sram_addr_t sram_addr;
-	unsigned int nents; //sg nents
-	unsigned int mlli_nents; //mlli nents might be different than the above
-};
-
-struct mlli_params {
-	struct dma_pool *curr_pool;
-	u8 *mlli_virt_addr;
-	dma_addr_t mlli_dma_addr;
-	u32 mlli_len;
-};
-
-int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
-
-int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
-
-int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
-			     unsigned int ivsize, unsigned int nbytes,
-			     void *info, struct scatterlist *src,
-			     struct scatterlist *dst, gfp_t flags);
-
-void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
-				unsigned int ivsize,
-				struct scatterlist *src,
-				struct scatterlist *dst);
-
-int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
-
-void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
-
-int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
-			      struct scatterlist *src, unsigned int nbytes,
-			      bool do_update, gfp_t flags);
-
-int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
-			       struct scatterlist *src, unsigned int nbytes,
-			       unsigned int block_size, gfp_t flags);
-
-void cc_unmap_hash_request(struct device *dev, void *ctx,
-			   struct scatterlist *src, bool do_revert);
-
-void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
-			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
-
-void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
-
-#endif /*__BUFFER_MGR_H__*/
-

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác