Эх сурвалжийг харах

Merge tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver patches from Greg KH:
 "Here's the "big" char/misc driver update for 4.3-rc1.

  Not much really interesting here, just a number of little changes all
  over the place, and some nice consolidation of the nvmem drivers to a
  common framework.  As usual, the mei drivers stand out as the largest
  "churn" to handle new devices and features in their hardware.

  All have been in linux-next for a while with no issues"

* tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (136 commits)
  auxdisplay: ks0108: initialize local parport variable
  extcon: palmas: Fix build break due to devm_gpiod_get_optional API change
  extcon: palmas: Support GPIO based USB ID detection
  extcon: Fix signedness bugs about break error handling
  extcon: Drop owner assignment from i2c_driver
  extcon: arizona: Simplify pdata symantics for micd_dbtime
  extcon: arizona: Declare 3-pole jack if we detect open circuit on mic
  extcon: Add exception handling to prevent the NULL pointer access
  extcon: arizona: Ensure variables are set for headphone detection
  extcon: arizona: Use gpiod inteface to handle micd_pol_gpio gpio
  extcon: arizona: Add basic microphone detection DT/ACPI bindings
  extcon: arizona: Update to use the new device properties API
  extcon: palmas: Remove the mutually_exclusive array
  extcon: Remove optional print_state() function pointer of struct extcon_dev
  extcon: Remove duplicate header file in extcon.h
  extcon: max77843: Clear IRQ bits state before request IRQ
  toshiba laptop: replace ioremap_cache with ioremap
  misc: eeprom: max6875: clean up max6875_read()
  misc: eeprom: clean up eeprom_read()
  misc: eeprom: 93xx46: clean up eeprom_93xx46_bin_read/write
  ...
Linus Torvalds 10 жил өмнө
parent
commit
1c00038c76
100 өөрчлөгдсөн 5369 нэмэгдсэн , 1684 устгасан
  1. 29 0
      Documentation/ABI/stable/sysfs-bus-vmbus
  2. 1 1
      Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
  3. 1 1
      Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
  4. 0 22
      Documentation/ABI/testing/sysfs-driver-sunxi-sid
  5. 1 0
      Documentation/devicetree/bindings/arm/coresight.txt
  6. 4 1
      Documentation/devicetree/bindings/extcon/extcon-palmas.txt
  7. 4 0
      Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
  8. 80 0
      Documentation/devicetree/bindings/nvmem/nvmem.txt
  9. 35 0
      Documentation/devicetree/bindings/nvmem/qfprom.txt
  10. 48 0
      Documentation/devicetree/bindings/power/qcom,coincell-charger.txt
  11. 2 0
      Documentation/ioctl/ioctl-number.txt
  12. 44 1
      Documentation/misc-devices/mei/mei.txt
  13. 152 0
      Documentation/nvmem/nvmem.txt
  14. 3 3
      Documentation/power/suspend-and-cpuhotplug.txt
  15. 2 2
      Documentation/trace/coresight.txt
  16. 10 0
      MAINTAINERS
  17. 10 0
      arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
  18. 6 0
      arch/arm/boot/dts/qcom-pm8941.dtsi
  19. 5 0
      arch/x86/include/asm/mshyperv.h
  20. 2 0
      arch/x86/include/uapi/asm/hyperv.h
  21. 47 0
      arch/x86/kernel/cpu/mshyperv.c
  22. 2 0
      drivers/Kconfig
  23. 1 0
      drivers/Makefile
  24. 60 37
      drivers/auxdisplay/ks0108.c
  25. 7 10
      drivers/char/misc.c
  26. 1 1
      drivers/char/nvram.c
  27. 1 1
      drivers/char/toshiba.c
  28. 6 4
      drivers/char/xillybus/xillybus_pcie.c
  29. 83 18
      drivers/extcon/extcon-arizona.c
  30. 0 18
      drivers/extcon/extcon-gpio.c
  31. 9 0
      drivers/extcon/extcon-max77843.c
  32. 116 18
      drivers/extcon/extcon-palmas.c
  33. 0 1
      drivers/extcon/extcon-rt8973a.c
  34. 0 1
      drivers/extcon/extcon-sm5502.c
  35. 1 0
      drivers/extcon/extcon-usb-gpio.c
  36. 35 13
      drivers/extcon/extcon.c
  37. 3 1
      drivers/hv/channel.c
  38. 28 6
      drivers/hv/channel_mgmt.c
  39. 119 33
      drivers/hv/hv.c
  40. 20 6
      drivers/hv/hv_balloon.c
  41. 13 8
      drivers/hv/hv_fcopy.c
  42. 3 0
      drivers/hv/hv_kvp.c
  43. 1 1
      drivers/hv/hv_utils_transport.c
  44. 15 1
      drivers/hv/hyperv_vmbus.h
  45. 3 11
      drivers/hv/ring_buffer.c
  46. 283 70
      drivers/hv/vmbus_drv.c
  47. 5 2
      drivers/hwtracing/coresight/coresight-etm.h
  48. 22 11
      drivers/hwtracing/coresight/coresight-etm3x.c
  49. 23 14
      drivers/hwtracing/coresight/coresight-etm4x.c
  50. 5 2
      drivers/hwtracing/coresight/coresight-etm4x.h
  51. 1 12
      drivers/hwtracing/coresight/coresight-replicator.c
  52. 1 3
      drivers/md/dm-ioctl.c
  53. 10 0
      drivers/misc/Kconfig
  54. 1 0
      drivers/misc/Makefile
  55. 0 1
      drivers/misc/ad525x_dpot-i2c.c
  56. 0 1
      drivers/misc/apds990x.c
  57. 0 1
      drivers/misc/bh1770glc.c
  58. 0 1
      drivers/misc/bmp085-i2c.c
  59. 1 6
      drivers/misc/cxl/sysfs.c
  60. 0 12
      drivers/misc/ds1682.c
  61. 0 13
      drivers/misc/eeprom/Kconfig
  62. 0 1
      drivers/misc/eeprom/Makefile
  63. 0 1
      drivers/misc/eeprom/at24.c
  64. 0 5
      drivers/misc/eeprom/eeprom.c
  65. 0 14
      drivers/misc/eeprom/eeprom_93xx46.c
  66. 0 6
      drivers/misc/eeprom/max6875.c
  67. 0 156
      drivers/misc/eeprom/sunxi_sid.c
  68. 0 1
      drivers/misc/isl29003.c
  69. 0 1
      drivers/misc/lis3lv02d/lis3lv02d_i2c.c
  70. 1 1
      drivers/misc/mei/Makefile
  71. 306 0
      drivers/misc/mei/bus-fixup.c
  72. 676 332
      drivers/misc/mei/bus.c
  73. 292 41
      drivers/misc/mei/client.c
  74. 8 0
      drivers/misc/mei/client.h
  75. 6 0
      drivers/misc/mei/debugfs.c
  76. 314 16
      drivers/misc/mei/hbm.c
  77. 3 0
      drivers/misc/mei/hbm.h
  78. 23 4
      drivers/misc/mei/hw-me-regs.h
  79. 425 74
      drivers/misc/mei/hw-me.c
  80. 6 2
      drivers/misc/mei/hw-me.h
  81. 129 5
      drivers/misc/mei/hw.h
  82. 2 1
      drivers/misc/mei/init.c
  83. 26 1
      drivers/misc/mei/interrupt.c
  84. 96 0
      drivers/misc/mei/main.c
  85. 30 17
      drivers/misc/mei/mei_dev.h
  86. 0 415
      drivers/misc/mei/nfc.c
  87. 15 17
      drivers/misc/mei/pci-me.c
  88. 152 0
      drivers/misc/qcom-coincell.c
  89. 12 93
      drivers/misc/ti-st/st_kim.c
  90. 2 15
      drivers/misc/ti-st/st_ll.c
  91. 0 1
      drivers/misc/tsl2550.c
  92. 85 85
      drivers/misc/vmw_balloon.c
  93. 1 6
      drivers/misc/vmw_vmci/vmci_host.c
  94. 2 1
      drivers/nfc/mei_phy.c
  95. 39 0
      drivers/nvmem/Kconfig
  96. 12 0
      drivers/nvmem/Makefile
  97. 1083 0
      drivers/nvmem/core.c
  98. 85 0
      drivers/nvmem/qfprom.c
  99. 171 0
      drivers/nvmem/sunxi_sid.c
  100. 2 3
      drivers/rtc/rtc-ds1374.c

+ 29 - 0
Documentation/ABI/stable/sysfs-bus-vmbus

@@ -0,0 +1,29 @@
+What:		/sys/bus/vmbus/devices/vmbus_*/id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus child_relid of the device's primary channel
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/class_id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus interface type GUID of the device
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/device_id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus interface instance GUID of the device
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping
+Date:		Jul 2015
+KernelVersion:	4.2.0
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The mapping of which primary/sub channels are bound to which
+		Virtual Processors.
+		Format: <channel's child_relid:the bound cpu's number>
+Users:		tools/hv/lsvmbus

+ 1 - 1
Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x

@@ -112,7 +112,7 @@ KernelVersion:	3.19
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Description: 	(RW) Mask to apply to all the context ID comparator.
 Description: 	(RW) Mask to apply to all the context ID comparator.
 
 
-What:		/sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_val
+What:		/sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_pid
 Date:		November 2014
 Date:		November 2014
 KernelVersion:	3.19
 KernelVersion:	3.19
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>

+ 1 - 1
Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x

@@ -249,7 +249,7 @@ KernelVersion:	4.01
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Description:	(RW) Select which context ID comparator to work with.
 Description:	(RW) Select which context ID comparator to work with.
 
 
-What:		/sys/bus/coresight/devices/<memory_map>.etm/ctxid_val
+What:		/sys/bus/coresight/devices/<memory_map>.etm/ctxid_pid
 Date:		April 2015
 Date:		April 2015
 KernelVersion:	4.01
 KernelVersion:	4.01
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>

+ 0 - 22
Documentation/ABI/testing/sysfs-driver-sunxi-sid

@@ -1,22 +0,0 @@
-What:		/sys/devices/*/<our-device>/eeprom
-Date:		August 2013
-Contact:	Oliver Schinagl <oliver@schinagl.nl>
-Description:	read-only access to the SID (Security-ID) on current
-		A-series SoC's from Allwinner. Currently supports A10, A10s, A13
-		and A20 CPU's. The earlier A1x series of SoCs exports 16 bytes,
-		whereas the newer A20 SoC exposes 512 bytes split into sections.
-		Besides the 16 bytes of SID, there's also an SJTAG area,
-		HDMI-HDCP key and some custom keys. Below a quick overview, for
-		details see the user manual:
-		0x000  128 bit root-key (sun[457]i)
-		0x010  128 bit boot-key (sun7i)
-		0x020   64 bit security-jtag-key (sun7i)
-		0x028   16 bit key configuration (sun7i)
-		0x02b   16 bit custom-vendor-key (sun7i)
-		0x02c  320 bit low general key (sun7i)
-		0x040   32 bit read-control access (sun7i)
-		0x064  224 bit low general key (sun7i)
-		0x080 2304 bit HDCP-key (sun7i)
-		0x1a0  768 bit high general key (sun7i)
-Users:		any user space application which wants to read the SID on
-		Allwinner's A-series of CPU's.

+ 1 - 0
Documentation/devicetree/bindings/arm/coresight.txt

@@ -17,6 +17,7 @@ its hardware characteristcs.
 		- "arm,coresight-tmc", "arm,primecell";
 		- "arm,coresight-tmc", "arm,primecell";
 		- "arm,coresight-funnel", "arm,primecell";
 		- "arm,coresight-funnel", "arm,primecell";
 		- "arm,coresight-etm3x", "arm,primecell";
 		- "arm,coresight-etm3x", "arm,primecell";
+		- "arm,coresight-etm4x", "arm,primecell";
 		- "qcom,coresight-replicator1x", "arm,primecell";
 		- "qcom,coresight-replicator1x", "arm,primecell";
 
 
 	* reg: physical base address and length of the register
 	* reg: physical base address and length of the register

+ 4 - 1
Documentation/devicetree/bindings/extcon/extcon-palmas.txt

@@ -10,8 +10,11 @@ Required Properties:
 
 
 Optional Properties:
 Optional Properties:
  - ti,wakeup : To enable the wakeup comparator in probe
  - ti,wakeup : To enable the wakeup comparator in probe
- - ti,enable-id-detection: Perform ID detection.
+ - ti,enable-id-detection: Perform ID detection. If id-gpio is specified
+		it performs id-detection using GPIO else using OTG core.
  - ti,enable-vbus-detection: Perform VBUS detection.
  - ti,enable-vbus-detection: Perform VBUS detection.
+ - id-gpio: gpio for GPIO ID detection. See gpio binding.
+ - debounce-delay-ms: debounce delay for GPIO ID pin in milliseconds.
 
 
 palmas-usb {
 palmas-usb {
        compatible = "ti,twl6035-usb", "ti,palmas-usb";
        compatible = "ti,twl6035-usb", "ti,palmas-usb";

+ 4 - 0
Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt → Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt

@@ -4,6 +4,10 @@ Required properties:
 - compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
 - compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
 - reg: Should contain registers location and length
 - reg: Should contain registers location and length
 
 
+= Data cells =
+Are child nodes of qfprom, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
 Example for sun4i:
 Example for sun4i:
 	sid@01c23800 {
 	sid@01c23800 {
 		compatible = "allwinner,sun4i-a10-sid";
 		compatible = "allwinner,sun4i-a10-sid";

+ 80 - 0
Documentation/devicetree/bindings/nvmem/nvmem.txt

@@ -0,0 +1,80 @@
+= NVMEM(Non Volatile Memory) Data Device Tree Bindings =
+
+This binding is intended to represent the location of hardware
+configuration data stored in NVMEMs like eeprom, efuses and so on.
+
+On a significant proportion of boards, the manufacturer has stored
+some data on NVMEM, for the OS to be able to retrieve these information
+and act upon it. Obviously, the OS has to know about where to retrieve
+these data from, and where they are stored on the storage device.
+
+This document is here to document this.
+
+= Data providers =
+Contains bindings specific to provider drivers and data cells as children
+of this node.
+
+Optional properties:
+ read-only: Mark the provider as read only.
+
+= Data cells =
+These are the child nodes of the provider which contain data cell
+information like offset and size in nvmem provider.
+
+Required properties:
+reg:	specifies the offset in byte within the storage device.
+
+Optional properties:
+
+bits:	Is pair of bit location and number of bits, which specifies offset
+	in bit and number of bits within the address range specified by reg property.
+	Offset takes values from 0-7.
+
+For example:
+
+	/* Provider */
+	qfprom: qfprom@00700000 {
+		...
+
+		/* Data cells */
+		tsens_calibration: calib@404 {
+			reg = <0x404 0x10>;
+		};
+
+		tsens_calibration_bckp: calib_bckp@504 {
+			reg = <0x504 0x11>;
+			bits = <6 128>
+		};
+
+		pvs_version: pvs-version@6 {
+			reg = <0x6 0x2>
+			bits = <7 2>
+		};
+
+		speed_bin: speed-bin@c{
+			reg = <0xc 0x1>;
+			bits = <2 3>;
+
+		};
+		...
+	};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells/providers.
+
+Required-properties:
+nvmem-cells: list of phandle to the nvmem data cells.
+nvmem-cell-names: names for the each nvmem-cells specified. Required if
+	nvmem-cells is used.
+
+Optional-properties:
+nvmem	: list of phandles to nvmem providers.
+nvmem-names: names for the each nvmem provider. required if nvmem is used.
+
+For example:
+
+	tsens {
+		...
+		nvmem-cells = <&tsens_calibration>;
+		nvmem-cell-names = "calibration";
+	};

+ 35 - 0
Documentation/devicetree/bindings/nvmem/qfprom.txt

@@ -0,0 +1,35 @@
+= Qualcomm QFPROM device tree bindings =
+
+This binding is intended to represent QFPROM which is found in most QCOM SOCs.
+
+Required properties:
+- compatible: should be "qcom,qfprom"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of qfprom, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+	qfprom: qfprom@00700000 {
+		compatible 	= "qcom,qfprom";
+		reg		= <0x00700000 0x8000>;
+		...
+		/* Data cells */
+		tsens_calibration: calib@404 {
+			reg = <0x4404 0x10>;
+		};
+	};
+
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+	tsens {
+		...
+		nvmem-cells = <&tsens_calibration>;
+		nvmem-cell-names = "calibration";
+	};

+ 48 - 0
Documentation/devicetree/bindings/power/qcom,coincell-charger.txt

@@ -0,0 +1,48 @@
+Qualcomm Coincell Charger:
+
+The hardware block controls charging for a coincell or capacitor that is
+used to provide power backup for certain features of the power management
+IC (PMIC)
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be: "qcom,pm8941-coincell"
+
+- reg:
+	Usage: required
+	Value type: <u32>
+	Definition: base address of the coincell charger registers
+
+- qcom,rset-ohms:
+	Usage: required
+	Value type: <u32>
+	Definition: resistance (in ohms) for current-limiting resistor
+		must be one of: 800, 1200, 1700, 2100
+
+- qcom,vset-millivolts:
+	Usage: required
+	Value type: <u32>
+	Definition: voltage (in millivolts) to apply for charging
+		must be one of: 2500, 3000, 3100, 3200
+
+- qcom,charger-disable:
+	Usage: optional
+	Value type: <boolean>
+	Definition: definining this property disables charging
+
+This charger is a sub-node of one of the 8941 PMIC blocks, and is specified
+as a child node in DTS of that node.  See ../mfd/qcom,spmi-pmic.txt and
+../mfd/qcom-pm8xxx.txt
+
+Example:
+
+	pm8941@0 {
+		coincell@2800 {
+			compatible = "qcom,pm8941-coincell";
+			reg = <0x2800>;
+
+			qcom,rset-ohms = <2100>;
+			qcom,vset-millivolts = <3000>;
+		};
+	};

+ 2 - 0
Documentation/ioctl/ioctl-number.txt

@@ -124,6 +124,8 @@ Code  Seq#(hex)	Include File		Comments
 'H'	00-7F	linux/hiddev.h		conflict!
 'H'	00-7F	linux/hiddev.h		conflict!
 'H'	00-0F	linux/hidraw.h		conflict!
 'H'	00-0F	linux/hidraw.h		conflict!
 'H'	01	linux/mei.h		conflict!
 'H'	01	linux/mei.h		conflict!
+'H'	02	linux/mei.h		conflict!
+'H'	03	linux/mei.h		conflict!
 'H'	00-0F	sound/asound.h		conflict!
 'H'	00-0F	sound/asound.h		conflict!
 'H'	20-40	sound/asound_fm.h	conflict!
 'H'	20-40	sound/asound_fm.h	conflict!
 'H'	80-8F	sound/sfnt_info.h	conflict!
 'H'	80-8F	sound/sfnt_info.h	conflict!

+ 44 - 1
Documentation/misc-devices/mei/mei.txt

@@ -96,7 +96,7 @@ A code snippet for an application communicating with Intel AMTHI client:
 IOCTL
 IOCTL
 =====
 =====
 
 
-The Intel MEI Driver supports the following IOCTL command:
+The Intel MEI Driver supports the following IOCTL commands:
 	IOCTL_MEI_CONNECT_CLIENT	Connect to firmware Feature (client).
 	IOCTL_MEI_CONNECT_CLIENT	Connect to firmware Feature (client).
 
 
 	usage:
 	usage:
@@ -125,6 +125,49 @@ The Intel MEI Driver supports the following IOCTL command:
         data that can be sent or received. (e.g. if MTU=2K, can send
         data that can be sent or received. (e.g. if MTU=2K, can send
         requests up to bytes 2k and received responses up to 2k bytes).
         requests up to bytes 2k and received responses up to 2k bytes).
 
 
+	IOCTL_MEI_NOTIFY_SET: enable or disable event notifications
+
+	Usage:
+		uint32_t enable;
+		ioctl(fd, IOCTL_MEI_NOTIFY_SET, &enable);
+
+	Inputs:
+		uint32_t enable = 1;
+		or
+		uint32_t enable[disable] = 0;
+
+	Error returns:
+		EINVAL	Wrong IOCTL Number
+		ENODEV	Device  is not initialized or the client not connected
+		ENOMEM	Unable to allocate memory to client internal data.
+		EFAULT	Fatal Error (e.g. Unable to access user input data)
+		EOPNOTSUPP if the device doesn't support the feature
+
+	Notes:
+	The client must be connected in order to enable notification events
+
+
+	IOCTL_MEI_NOTIFY_GET : retrieve event
+
+	Usage:
+		uint32_t event;
+		ioctl(fd, IOCTL_MEI_NOTIFY_GET, &event);
+
+	Outputs:
+		1 - if an event is pending
+		0 - if there is no even pending
+
+	Error returns:
+		EINVAL	Wrong IOCTL Number
+		ENODEV	Device is not initialized or the client not connected
+		ENOMEM	Unable to allocate memory to client internal data.
+		EFAULT	Fatal Error (e.g. Unable to access user input data)
+		EOPNOTSUPP if the device doesn't support the feature
+
+	Notes:
+	The client must be connected and event notification has to be enabled
+	in order to receive an event
+
 
 
 Intel ME Applications
 Intel ME Applications
 =====================
 =====================

+ 152 - 0
Documentation/nvmem/nvmem.txt

@@ -0,0 +1,152 @@
+			    NVMEM SUBSYSTEM
+	  Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+This document explains the NVMEM Framework along with the APIs provided,
+and how to use it.
+
+1. Introduction
+===============
+*NVMEM* is the abbreviation for Non Volatile Memory layer. It is used to
+retrieve configuration of SOC or Device specific data from non volatile
+memories like eeprom, efuses and so on.
+
+Before this framework existed, NVMEM drivers like eeprom were stored in
+drivers/misc, where they all had to duplicate pretty much the same code to
+register a sysfs file, allow in-kernel users to access the content of the
+devices they were driving, etc.
+
+This was also a problem as far as other in-kernel users were involved, since
+the solutions used were pretty much different from one driver to another, there
+was a rather big abstraction leak.
+
+This framework aims at solve these problems. It also introduces DT
+representation for consumer devices to go get the data they require (MAC
+Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs. This
+framework is based on regmap, so that most of the abstraction available in
+regmap can be reused, across multiple types of buses.
+
+NVMEM Providers
++++++++++++++++
+
+NVMEM provider refers to an entity that implements methods to initialize, read
+and write the non-volatile memory.
+
+2. Registering/Unregistering the NVMEM provider
+===============================================
+
+A NVMEM provider can register with NVMEM core by supplying relevant
+nvmem configuration to nvmem_register(), on success core would return a valid
+nvmem_device pointer.
+
+nvmem_unregister(nvmem) is used to unregister a previously registered provider.
+
+For example, a simple qfprom case:
+
+static struct nvmem_config econfig = {
+	.name = "qfprom",
+	.owner = THIS_MODULE,
+};
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+	...
+	econfig.dev = &pdev->dev;
+	nvmem = nvmem_register(&econfig);
+	...
+}
+
+It is mandatory that the NVMEM provider has a regmap associated with its
+struct device. Failure to do would return error code from nvmem_register().
+
+NVMEM Consumers
++++++++++++++++
+
+NVMEM consumers are the entities which make use of the NVMEM provider to
+read from and to NVMEM.
+
+3. NVMEM cell based consumer APIs
+=================================
+
+NVMEM cells are the data entries/fields in the NVMEM.
+The NVMEM framework provides 3 APIs to read/write NVMEM cells.
+
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+
+void *nvmem_cell_read(struct nvmem_cell *cell, ssize_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, ssize_t len);
+
+*nvmem_cell_get() apis will get a reference to nvmem cell for a given id,
+and nvmem_cell_read/write() can then read or write to the cell.
+Once the usage of the cell is finished the consumer should call *nvmem_cell_put()
+to free all the allocation memory for the cell.
+
+4. Direct NVMEM device based consumer APIs
+==========================================
+
+In some instances it is necessary to directly read/write the NVMEM.
+To facilitate such consumers NVMEM framework provides below apis.
+
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+					   const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+		      size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+		       size_t bytes, void *buf);
+int nvmem_device_cell_read(struct nvmem_device *nvmem,
+			   struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+			    struct nvmem_cell_info *info, void *buf);
+
+Before the consumers can read/write NVMEM directly, it should get hold
+of nvmem_controller from one of the *nvmem_device_get() api.
+
+The difference between these apis and cell based apis is that these apis always
+take nvmem_device as parameter.
+
+5. Releasing a reference to the NVMEM
+=====================================
+
+When a consumers no longer needs the NVMEM, it has to release the reference
+to the NVMEM it has obtained using the APIs mentioned in the above section.
+The NVMEM framework provides 2 APIs to release a reference to the NVMEM.
+
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+
+Both these APIs are used to release a reference to the NVMEM and
+devm_nvmem_cell_put and devm_nvmem_device_put destroys the devres associated
+with this NVMEM.
+
+Userspace
++++++++++
+
+6. Userspace binary interface
+==============================
+
+Userspace can read/write the raw NVMEM file located at
+/sys/bus/nvmem/devices/*/nvmem
+
+ex:
+
+hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
+
+0000000 0000 0000 0000 0000 0000 0000 0000 0000
+*
+00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
+0000000 0000 0000 0000 0000 0000 0000 0000 0000
+...
+*
+0001000
+
+7. DeviceTree Binding
+=====================
+
+See Documentation/devicetree/bindings/nvmem/nvmem.txt

+ 3 - 3
Documentation/power/suspend-and-cpuhotplug.txt

@@ -72,7 +72,7 @@ More details follow:
                                         |
                                         |
                                         v
                                         v
                            Disable regular cpu hotplug
                            Disable regular cpu hotplug
-                        by setting cpu_hotplug_disabled=1
+                        by increasing cpu_hotplug_disabled
                                         |
                                         |
                                         v
                                         v
                             Release cpu_add_remove_lock
                             Release cpu_add_remove_lock
@@ -89,7 +89,7 @@ Resuming back is likewise, with the counterparts being (in the order of
 execution during resume):
 execution during resume):
 * enable_nonboot_cpus() which involves:
 * enable_nonboot_cpus() which involves:
    |  Acquire cpu_add_remove_lock
    |  Acquire cpu_add_remove_lock
-   |  Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
+   |  Decrease cpu_hotplug_disabled, thereby enabling regular cpu hotplug
    |  Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
    |  Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
    |  Release cpu_add_remove_lock
    |  Release cpu_add_remove_lock
    v
    v
@@ -120,7 +120,7 @@ after the entire cycle is complete (i.e., suspend + resume).
                            Acquire cpu_add_remove_lock
                            Acquire cpu_add_remove_lock
                                         |
                                         |
                                         v
                                         v
-                          If cpu_hotplug_disabled is 1
+                          If cpu_hotplug_disabled > 0
                                 return gracefully
                                 return gracefully
                                         |
                                         |
                                         |
                                         |

+ 2 - 2
Documentation/trace/coresight.txt

@@ -15,7 +15,7 @@ HW assisted tracing is becoming increasingly useful when dealing with systems
 that have many SoCs and other components like GPU and DMA engines.  ARM has
 that have many SoCs and other components like GPU and DMA engines.  ARM has
 developed a HW assisted tracing solution by means of different components, each
 developed a HW assisted tracing solution by means of different components, each
 being added to a design at synthesis time to cater to specific tracing needs.
 being added to a design at synthesis time to cater to specific tracing needs.
-Compoments are generally categorised as source, link and sinks and are
+Components are generally categorised as source, link and sinks and are
 (usually) discovered using the AMBA bus.
 (usually) discovered using the AMBA bus.
 
 
 "Sources" generate a compressed stream representing the processor instruction
 "Sources" generate a compressed stream representing the processor instruction
@@ -138,7 +138,7 @@ void coresight_unregister(struct coresight_device *csdev);
 
 
 The registering function is taking a "struct coresight_device *csdev" and
 The registering function is taking a "struct coresight_device *csdev" and
 register the device with the core framework.  The unregister function takes
 register the device with the core framework.  The unregister function takes
-a reference to a "strut coresight_device", obtained at registration time.
+a reference to a "struct coresight_device", obtained at registration time.
 
 
 If everything goes well during the registration process the new devices will
 If everything goes well during the registration process the new devices will
 show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:
 show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:

+ 10 - 0
MAINTAINERS

@@ -4966,6 +4966,7 @@ F:	drivers/scsi/storvsc_drv.c
 F:	drivers/video/fbdev/hyperv_fb.c
 F:	drivers/video/fbdev/hyperv_fb.c
 F:	include/linux/hyperv.h
 F:	include/linux/hyperv.h
 F:	tools/hv/
 F:	tools/hv/
+F:	Documentation/ABI/stable/sysfs-bus-vmbus
 
 
 I2C OVER PARALLEL PORT
 I2C OVER PARALLEL PORT
 M:	Jean Delvare <jdelvare@suse.com>
 M:	Jean Delvare <jdelvare@suse.com>
@@ -7298,6 +7299,15 @@ S:	Supported
 F:	drivers/block/nvme*
 F:	drivers/block/nvme*
 F:	include/linux/nvme.h
 F:	include/linux/nvme.h
 
 
+NVMEM FRAMEWORK
+M:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M:	Maxime Ripard <maxime.ripard@free-electrons.com>
+S:	Maintained
+F:	drivers/nvmem/
+F:	Documentation/devicetree/bindings/nvmem/
+F:	include/linux/nvmem-consumer.h
+F:	include/linux/nvmem-provider.h
+
 NXP-NCI NFC DRIVER
 NXP-NCI NFC DRIVER
 M:	Clément Perrochaud <clement.perrochaud@effinnov.com>
 M:	Clément Perrochaud <clement.perrochaud@effinnov.com>
 R:	Charles Gorand <charles.gorand@effinnov.com>
 R:	Charles Gorand <charles.gorand@effinnov.com>

+ 10 - 0
arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts

@@ -17,3 +17,13 @@
 		status = "ok";
 		status = "ok";
 	};
 	};
 };
 };
+
+&spmi_bus {
+	pm8941@0 {
+		coincell@2800 {
+			status = "ok";
+			qcom,rset-ohms = <2100>;
+			qcom,vset-millivolts = <3000>;
+		};
+	};
+};

+ 6 - 0
arch/arm/boot/dts/qcom-pm8941.dtsi

@@ -125,6 +125,12 @@
 			interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
 			interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
 			qcom,external-resistor-micro-ohms = <10000>;
 			qcom,external-resistor-micro-ohms = <10000>;
 		};
 		};
+
+		coincell@2800 {
+			compatible = "qcom,pm8941-coincell";
+			reg = <0x2800>;
+			status = "disabled";
+		};
 	};
 	};
 
 
 	usid1: pm8941@1 {
 	usid1: pm8941@1 {

+ 5 - 0
arch/x86/include/asm/mshyperv.h

@@ -7,6 +7,7 @@
 
 
 struct ms_hyperv_info {
 struct ms_hyperv_info {
 	u32 features;
 	u32 features;
+	u32 misc_features;
 	u32 hints;
 	u32 hints;
 };
 };
 
 
@@ -20,4 +21,8 @@ void hyperv_vector_handler(struct pt_regs *regs);
 void hv_setup_vmbus_irq(void (*handler)(void));
 void hv_setup_vmbus_irq(void (*handler)(void));
 void hv_remove_vmbus_irq(void);
 void hv_remove_vmbus_irq(void);
 
 
+void hv_setup_kexec_handler(void (*handler)(void));
+void hv_remove_kexec_handler(void);
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
+void hv_remove_crash_handler(void);
 #endif
 #endif

+ 2 - 0
arch/x86/include/uapi/asm/hyperv.h

@@ -27,6 +27,8 @@
 #define HV_X64_MSR_VP_RUNTIME_AVAILABLE		(1 << 0)
 #define HV_X64_MSR_VP_RUNTIME_AVAILABLE		(1 << 0)
 /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
 /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
 #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE	(1 << 1)
 #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE	(1 << 1)
+/* Partition reference TSC MSR is available */
+#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE              (1 << 9)
 
 
 /* A partition's reference time stamp counter (TSC) page */
 /* A partition's reference time stamp counter (TSC) page */
 #define HV_X64_MSR_REFERENCE_TSC		0x40000021
 #define HV_X64_MSR_REFERENCE_TSC		0x40000021

+ 47 - 0
arch/x86/kernel/cpu/mshyperv.c

@@ -18,6 +18,7 @@
 #include <linux/efi.h>
 #include <linux/efi.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
+#include <linux/kexec.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv.h>
 #include <asm/hyperv.h>
@@ -28,10 +29,14 @@
 #include <asm/i8259.h>
 #include <asm/i8259.h>
 #include <asm/apic.h>
 #include <asm/apic.h>
 #include <asm/timer.h>
 #include <asm/timer.h>
+#include <asm/reboot.h>
 
 
 struct ms_hyperv_info ms_hyperv;
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
 
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
+
 #if IS_ENABLED(CONFIG_HYPERV)
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
 static void (*vmbus_handler)(void);
 
 
@@ -67,8 +72,47 @@ void hv_remove_vmbus_irq(void)
 }
 }
 EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
 EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
 EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
 EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+
+void hv_setup_kexec_handler(void (*handler)(void))
+{
+	hv_kexec_handler = handler;
+}
+EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
+
+void hv_remove_kexec_handler(void)
+{
+	hv_kexec_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
+
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
+{
+	hv_crash_handler = handler;
+}
+EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
+
+void hv_remove_crash_handler(void)
+{
+	hv_crash_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
 #endif
 #endif
 
 
+static void hv_machine_shutdown(void)
+{
+	if (kexec_in_progress && hv_kexec_handler)
+		hv_kexec_handler();
+	native_machine_shutdown();
+}
+
+static void hv_machine_crash_shutdown(struct pt_regs *regs)
+{
+	if (hv_crash_handler)
+		hv_crash_handler(regs);
+	native_machine_crash_shutdown(regs);
+}
+
+
 static uint32_t  __init ms_hyperv_platform(void)
 static uint32_t  __init ms_hyperv_platform(void)
 {
 {
 	u32 eax;
 	u32 eax;
@@ -114,6 +158,7 @@ static void __init ms_hyperv_init_platform(void)
 	 * Extract the features and hints
 	 * Extract the features and hints
 	 */
 	 */
 	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
 	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
 
 	printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
 	printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
@@ -141,6 +186,8 @@ static void __init ms_hyperv_init_platform(void)
 	no_timer_check = 1;
 	no_timer_check = 1;
 #endif
 #endif
 
 
+	machine_ops.shutdown = hv_machine_shutdown;
+	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
 }
 }
 
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {

+ 2 - 0
drivers/Kconfig

@@ -184,4 +184,6 @@ source "drivers/android/Kconfig"
 
 
 source "drivers/nvdimm/Kconfig"
 source "drivers/nvdimm/Kconfig"
 
 
+source "drivers/nvmem/Kconfig"
+
 endmenu
 endmenu

+ 1 - 0
drivers/Makefile

@@ -165,3 +165,4 @@ obj-$(CONFIG_RAS)		+= ras/
 obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
 obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_ANDROID)		+= android/
+obj-$(CONFIG_NVMEM)		+= nvmem/

+ 60 - 37
drivers/auxdisplay/ks0108.c

@@ -23,6 +23,8 @@
  *
  *
  */
  */
 
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -90,17 +92,19 @@ void ks0108_displaystate(unsigned char state)
 
 
 void ks0108_startline(unsigned char startline)
 void ks0108_startline(unsigned char startline)
 {
 {
-	ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7));
+	ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) |
+			 bit(7));
 }
 }
 
 
 void ks0108_address(unsigned char address)
 void ks0108_address(unsigned char address)
 {
 {
-	ks0108_writedata(min(address,(unsigned char)63) | bit(6));
+	ks0108_writedata(min_t(unsigned char, address, 63) | bit(6));
 }
 }
 
 
 void ks0108_page(unsigned char page)
 void ks0108_page(unsigned char page)
 {
 {
-	ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7));
+	ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) |
+			 bit(5) | bit(7));
 }
 }
 
 
 EXPORT_SYMBOL_GPL(ks0108_writedata);
 EXPORT_SYMBOL_GPL(ks0108_writedata);
@@ -121,52 +125,71 @@ unsigned char ks0108_isinited(void)
 }
 }
 EXPORT_SYMBOL_GPL(ks0108_isinited);
 EXPORT_SYMBOL_GPL(ks0108_isinited);
 
 
-/*
- * Module Init & Exit
- */
-
-static int __init ks0108_init(void)
+static void ks0108_parport_attach(struct parport *port)
 {
 {
-	int result;
-	int ret = -EINVAL;
-
-	ks0108_parport = parport_find_base(ks0108_port);
-	if (ks0108_parport == NULL) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"parport didn't find %i port\n", ks0108_port);
-		goto none;
-	}
-
-	ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
-		NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
-	if (ks0108_pardevice == NULL) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"parport didn't register new device\n");
-		goto none;
+	struct pardev_cb ks0108_cb;
+
+	if (port->base != ks0108_port)
+		return;
+
+	memset(&ks0108_cb, 0, sizeof(ks0108_cb));
+	ks0108_cb.flags = PARPORT_DEV_EXCL;
+	ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME,
+						      &ks0108_cb, 0);
+	if (!ks0108_pardevice) {
+		pr_err("ERROR: parport didn't register new device\n");
+		return;
 	}
 	}
-
-	result = parport_claim(ks0108_pardevice);
-	if (result != 0) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"can't claim %i parport, maybe in use\n", ks0108_port);
-		ret = result;
-		goto registered;
+	if (parport_claim(ks0108_pardevice)) {
+		pr_err("could not claim access to parport %i. Aborting.\n",
+		       ks0108_port);
+		goto err_unreg_device;
 	}
 	}
 
 
+	ks0108_parport = port;
 	ks0108_inited = 1;
 	ks0108_inited = 1;
-	return 0;
+	return;
 
 
-registered:
+err_unreg_device:
 	parport_unregister_device(ks0108_pardevice);
 	parport_unregister_device(ks0108_pardevice);
-
-none:
-	return ret;
+	ks0108_pardevice = NULL;
 }
 }
 
 
-static void __exit ks0108_exit(void)
+static void ks0108_parport_detach(struct parport *port)
 {
 {
+	if (port->base != ks0108_port)
+		return;
+
+	if (!ks0108_pardevice) {
+		pr_err("%s: already unregistered.\n", KS0108_NAME);
+		return;
+	}
+
 	parport_release(ks0108_pardevice);
 	parport_release(ks0108_pardevice);
 	parport_unregister_device(ks0108_pardevice);
 	parport_unregister_device(ks0108_pardevice);
+	ks0108_pardevice = NULL;
+	ks0108_parport = NULL;
+}
+
+/*
+ * Module Init & Exit
+ */
+
+static struct parport_driver ks0108_parport_driver = {
+	.name = "ks0108",
+	.match_port = ks0108_parport_attach,
+	.detach = ks0108_parport_detach,
+	.devmodel = true,
+};
+
+static int __init ks0108_init(void)
+{
+	return parport_register_driver(&ks0108_parport_driver);
+}
+
+static void __exit ks0108_exit(void)
+{
+	parport_unregister_driver(&ks0108_parport_driver);
 }
 }
 
 
 module_init(ks0108_init);
 module_init(ks0108_init);

+ 7 - 10
drivers/char/misc.c

@@ -243,17 +243,15 @@ int misc_register(struct miscdevice * misc)
  *	@misc: device to unregister
  *	@misc: device to unregister
  *
  *
  *	Unregister a miscellaneous device that was previously
  *	Unregister a miscellaneous device that was previously
- *	successfully registered with misc_register(). Success
- *	is indicated by a zero return, a negative errno code
- *	indicates an error.
+ *	successfully registered with misc_register().
  */
  */
 
 
-int misc_deregister(struct miscdevice *misc)
+void misc_deregister(struct miscdevice *misc)
 {
 {
 	int i = DYNAMIC_MINORS - misc->minor - 1;
 	int i = DYNAMIC_MINORS - misc->minor - 1;
 
 
 	if (WARN_ON(list_empty(&misc->list)))
 	if (WARN_ON(list_empty(&misc->list)))
-		return -EINVAL;
+		return;
 
 
 	mutex_lock(&misc_mtx);
 	mutex_lock(&misc_mtx);
 	list_del(&misc->list);
 	list_del(&misc->list);
@@ -261,7 +259,6 @@ int misc_deregister(struct miscdevice *misc)
 	if (i < DYNAMIC_MINORS && i >= 0)
 	if (i < DYNAMIC_MINORS && i >= 0)
 		clear_bit(i, misc_minors);
 		clear_bit(i, misc_minors);
 	mutex_unlock(&misc_mtx);
 	mutex_unlock(&misc_mtx);
-	return 0;
 }
 }
 
 
 EXPORT_SYMBOL(misc_register);
 EXPORT_SYMBOL(misc_register);
@@ -281,10 +278,9 @@ static char *misc_devnode(struct device *dev, umode_t *mode)
 static int __init misc_init(void)
 static int __init misc_init(void)
 {
 {
 	int err;
 	int err;
+	struct proc_dir_entry *ret;
 
 
-#ifdef CONFIG_PROC_FS
-	proc_create("misc", 0, NULL, &misc_proc_fops);
-#endif
+	ret = proc_create("misc", 0, NULL, &misc_proc_fops);
 	misc_class = class_create(THIS_MODULE, "misc");
 	misc_class = class_create(THIS_MODULE, "misc");
 	err = PTR_ERR(misc_class);
 	err = PTR_ERR(misc_class);
 	if (IS_ERR(misc_class))
 	if (IS_ERR(misc_class))
@@ -300,7 +296,8 @@ fail_printk:
 	printk("unable to get major %d for misc devices\n", MISC_MAJOR);
 	printk("unable to get major %d for misc devices\n", MISC_MAJOR);
 	class_destroy(misc_class);
 	class_destroy(misc_class);
 fail_remove:
 fail_remove:
-	remove_proc_entry("misc", NULL);
+	if (ret)
+		remove_proc_entry("misc", NULL);
 	return err;
 	return err;
 }
 }
 subsys_initcall(misc_init);
 subsys_initcall(misc_init);

+ 1 - 1
drivers/char/nvram.c

@@ -702,7 +702,7 @@ static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
 		seq_printf(seq, "%ds%s\n", nvram[10],
 		seq_printf(seq, "%ds%s\n", nvram[10],
 		    nvram[10] < 8 ? ", no memory test" : "");
 		    nvram[10] < 8 ? ", no memory test" : "");
 
 
-	vmode = (nvram[14] << 8) || nvram[15];
+	vmode = (nvram[14] << 8) | nvram[15];
 	seq_printf(seq,
 	seq_printf(seq,
 	    "Video mode       : %s colors, %d columns, %s %s monitor\n",
 	    "Video mode       : %s colors, %d columns, %s %s monitor\n",
 	    colors[vmode & 7],
 	    colors[vmode & 7],

+ 1 - 1
drivers/char/toshiba.c

@@ -430,7 +430,7 @@ static int tosh_probe(void)
 	int i,major,minor,day,year,month,flag;
 	int i,major,minor,day,year,month,flag;
 	unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
 	unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
 	SMMRegisters regs;
 	SMMRegisters regs;
-	void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
+	void __iomem *bios = ioremap(0xf0000, 0x10000);
 
 
 	if (!bios)
 	if (!bios)
 		return -ENOMEM;
 		return -ENOMEM;

+ 6 - 4
drivers/char/xillybus/xillybus_pcie.c

@@ -193,14 +193,16 @@ static int xilly_probe(struct pci_dev *pdev,
 	}
 	}
 
 
 	/*
 	/*
-	 * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
-	 * is the right thing. But some unclever PCIe drivers report it's OK
-	 * when the hardware drops those 64-bit PCIe packets. So trust
-	 * nobody and use 32 bits DMA addressing in any case.
+	 * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
+	 * even when the PCIe driver claims that a 64-bit mask is OK. On the
+	 * other hand, on some architectures, 64-bit addressing is mandatory.
+	 * So go for the 64-bit mask only when failing is the other option.
 	 */
 	 */
 
 
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 		endpoint->dma_using_dac = 0;
 		endpoint->dma_using_dac = 0;
+	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		endpoint->dma_using_dac = 1;
 	} else {
 	} else {
 		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
 		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
 		return -ENODEV;
 		return -ENODEV;

+ 83 - 18
drivers/extcon/extcon-arizona.c

@@ -20,10 +20,12 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/err.h>
 #include <linux/err.h>
+#include <linux/gpio/consumer.h>
 #include <linux/gpio.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/consumer.h>
 #include <linux/extcon.h>
 #include <linux/extcon.h>
 
 
@@ -46,6 +48,9 @@
 #define HPDET_DEBOUNCE 500
 #define HPDET_DEBOUNCE 500
 #define DEFAULT_MICD_TIMEOUT 2000
 #define DEFAULT_MICD_TIMEOUT 2000
 
 
+#define MICD_DBTIME_TWO_READINGS 2
+#define MICD_DBTIME_FOUR_READINGS 4
+
 #define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
 #define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
 			 ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
 			 ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
 			 ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
 			 ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
@@ -94,6 +99,8 @@ struct arizona_extcon_info {
 	int hpdet_ip_version;
 	int hpdet_ip_version;
 
 
 	struct extcon_dev *edev;
 	struct extcon_dev *edev;
+
+	struct gpio_desc *micd_pol_gpio;
 };
 };
 
 
 static const struct arizona_micd_config micd_default_modes[] = {
 static const struct arizona_micd_config micd_default_modes[] = {
@@ -204,6 +211,10 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
 	if (arizona->pdata.micd_pol_gpio > 0)
 	if (arizona->pdata.micd_pol_gpio > 0)
 		gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
 		gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
 					info->micd_modes[mode].gpio);
 					info->micd_modes[mode].gpio);
+	else
+		gpiod_set_value_cansleep(info->micd_pol_gpio,
+					 info->micd_modes[mode].gpio);
+
 	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 			   ARIZONA_MICD_BIAS_SRC_MASK,
 			   ARIZONA_MICD_BIAS_SRC_MASK,
 			   info->micd_modes[mode].bias <<
 			   info->micd_modes[mode].bias <<
@@ -757,10 +768,11 @@ static void arizona_micd_timeout_work(struct work_struct *work)
 	mutex_lock(&info->lock);
 	mutex_lock(&info->lock);
 
 
 	dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n");
 	dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n");
-	arizona_identify_headphone(info);
 
 
 	info->detecting = false;
 	info->detecting = false;
 
 
+	arizona_identify_headphone(info);
+
 	arizona_stop_mic(info);
 	arizona_stop_mic(info);
 
 
 	mutex_unlock(&info->lock);
 	mutex_unlock(&info->lock);
@@ -820,12 +832,18 @@ static void arizona_micd_detect(struct work_struct *work)
 	/* Due to jack detect this should never happen */
 	/* Due to jack detect this should never happen */
 	if (!(val & ARIZONA_MICD_STS)) {
 	if (!(val & ARIZONA_MICD_STS)) {
 		dev_warn(arizona->dev, "Detected open circuit\n");
 		dev_warn(arizona->dev, "Detected open circuit\n");
+		info->mic = false;
+		arizona_stop_mic(info);
 		info->detecting = false;
 		info->detecting = false;
+		arizona_identify_headphone(info);
 		goto handled;
 		goto handled;
 	}
 	}
 
 
 	/* If we got a high impedence we should have a headset, report it. */
 	/* If we got a high impedence we should have a headset, report it. */
 	if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
 	if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+		info->mic = true;
+		info->detecting = false;
+
 		arizona_identify_headphone(info);
 		arizona_identify_headphone(info);
 
 
 		ret = extcon_set_cable_state_(info->edev,
 		ret = extcon_set_cable_state_(info->edev,
@@ -841,8 +859,6 @@ static void arizona_micd_detect(struct work_struct *work)
 				ret);
 				ret);
 		}
 		}
 
 
-		info->mic = true;
-		info->detecting = false;
 		goto handled;
 		goto handled;
 	}
 	}
 
 
@@ -855,10 +871,11 @@ static void arizona_micd_detect(struct work_struct *work)
 	if (info->detecting && (val & MICD_LVL_1_TO_7)) {
 	if (info->detecting && (val & MICD_LVL_1_TO_7)) {
 		if (info->jack_flips >= info->micd_num_modes * 10) {
 		if (info->jack_flips >= info->micd_num_modes * 10) {
 			dev_dbg(arizona->dev, "Detected HP/line\n");
 			dev_dbg(arizona->dev, "Detected HP/line\n");
-			arizona_identify_headphone(info);
 
 
 			info->detecting = false;
 			info->detecting = false;
 
 
+			arizona_identify_headphone(info);
+
 			arizona_stop_mic(info);
 			arizona_stop_mic(info);
 		} else {
 		} else {
 			info->micd_mode++;
 			info->micd_mode++;
@@ -1110,12 +1127,12 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
 	regmap_update_bits(arizona->regmap, reg, mask, level);
 	regmap_update_bits(arizona->regmap, reg, mask, level);
 }
 }
 
 
-static int arizona_extcon_of_get_pdata(struct arizona *arizona)
+static int arizona_extcon_device_get_pdata(struct arizona *arizona)
 {
 {
 	struct arizona_pdata *pdata = &arizona->pdata;
 	struct arizona_pdata *pdata = &arizona->pdata;
 	unsigned int val = ARIZONA_ACCDET_MODE_HPL;
 	unsigned int val = ARIZONA_ACCDET_MODE_HPL;
 
 
-	of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
+	device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val);
 	switch (val) {
 	switch (val) {
 	case ARIZONA_ACCDET_MODE_HPL:
 	case ARIZONA_ACCDET_MODE_HPL:
 	case ARIZONA_ACCDET_MODE_HPR:
 	case ARIZONA_ACCDET_MODE_HPR:
@@ -1127,6 +1144,24 @@ static int arizona_extcon_of_get_pdata(struct arizona *arizona)
 		pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
 		pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
 	}
 	}
 
 
+	device_property_read_u32(arizona->dev, "wlf,micd-detect-debounce",
+				 &pdata->micd_detect_debounce);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-bias-start-time",
+				 &pdata->micd_bias_start_time);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-rate",
+				 &pdata->micd_rate);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-dbtime",
+				 &pdata->micd_dbtime);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-timeout",
+				 &pdata->micd_timeout);
+
+	pdata->micd_force_micbias = device_property_read_bool(arizona->dev,
+						"wlf,micd-force-micbias");
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1147,10 +1182,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 	if (!info)
 	if (!info)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	if (IS_ENABLED(CONFIG_OF)) {
-		if (!dev_get_platdata(arizona->dev))
-			arizona_extcon_of_get_pdata(arizona);
-	}
+	if (!dev_get_platdata(arizona->dev))
+		arizona_extcon_device_get_pdata(arizona);
 
 
 	info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
 	info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
 	if (IS_ERR(info->micvdd)) {
 	if (IS_ERR(info->micvdd)) {
@@ -1241,6 +1274,27 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 				arizona->pdata.micd_pol_gpio, ret);
 				arizona->pdata.micd_pol_gpio, ret);
 			goto err_register;
 			goto err_register;
 		}
 		}
+	} else {
+		if (info->micd_modes[0].gpio)
+			mode = GPIOD_OUT_HIGH;
+		else
+			mode = GPIOD_OUT_LOW;
+
+		/* We can't use devm here because we need to do the get
+		 * against the MFD device, as that is where the of_node
+		 * will reside, but if we devm against that the GPIO
+		 * will not be freed if the extcon driver is unloaded.
+		 */
+		info->micd_pol_gpio = gpiod_get_optional(arizona->dev,
+							 "wlf,micd-pol",
+							 GPIOD_OUT_LOW);
+		if (IS_ERR(info->micd_pol_gpio)) {
+			ret = PTR_ERR(info->micd_pol_gpio);
+			dev_err(arizona->dev,
+				"Failed to get microphone polarity GPIO: %d\n",
+				ret);
+			goto err_register;
+		}
 	}
 	}
 
 
 	if (arizona->pdata.hpdet_id_gpio > 0) {
 	if (arizona->pdata.hpdet_id_gpio > 0) {
@@ -1251,7 +1305,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 		if (ret != 0) {
 		if (ret != 0) {
 			dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
 			dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
 				arizona->pdata.hpdet_id_gpio, ret);
 				arizona->pdata.hpdet_id_gpio, ret);
-			goto err_register;
+			goto err_gpio;
 		}
 		}
 	}
 	}
 
 
@@ -1267,11 +1321,19 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 				   arizona->pdata.micd_rate
 				   arizona->pdata.micd_rate
 				   << ARIZONA_MICD_RATE_SHIFT);
 				   << ARIZONA_MICD_RATE_SHIFT);
 
 
-	if (arizona->pdata.micd_dbtime)
+	switch (arizona->pdata.micd_dbtime) {
+	case MICD_DBTIME_FOUR_READINGS:
 		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 				   ARIZONA_MICD_DBTIME_MASK,
 				   ARIZONA_MICD_DBTIME_MASK,
-				   arizona->pdata.micd_dbtime
-				   << ARIZONA_MICD_DBTIME_SHIFT);
+				   ARIZONA_MICD_DBTIME);
+		break;
+	case MICD_DBTIME_TWO_READINGS:
+		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				   ARIZONA_MICD_DBTIME_MASK, 0);
+		break;
+	default:
+		break;
+	}
 
 
 	BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40);
 	BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40);
 
 
@@ -1295,7 +1357,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 				dev_err(arizona->dev,
 				dev_err(arizona->dev,
 					"MICD ranges must be sorted\n");
 					"MICD ranges must be sorted\n");
 				ret = -EINVAL;
 				ret = -EINVAL;
-				goto err_input;
+				goto err_gpio;
 			}
 			}
 		}
 		}
 	}
 	}
@@ -1314,7 +1376,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 			dev_err(arizona->dev, "Unsupported MICD level %d\n",
 			dev_err(arizona->dev, "Unsupported MICD level %d\n",
 				info->micd_ranges[i].max);
 				info->micd_ranges[i].max);
 			ret = -EINVAL;
 			ret = -EINVAL;
-			goto err_input;
+			goto err_gpio;
 		}
 		}
 
 
 		dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n",
 		dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n",
@@ -1387,7 +1449,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
 	if (ret != 0) {
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
 		dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
 			ret);
 			ret);
-		goto err_input;
+		goto err_gpio;
 	}
 	}
 
 
 	ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
 	ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1458,7 +1520,8 @@ err_rise_wake:
 	arizona_set_irq_wake(arizona, jack_irq_rise, 0);
 	arizona_set_irq_wake(arizona, jack_irq_rise, 0);
 err_rise:
 err_rise:
 	arizona_free_irq(arizona, jack_irq_rise, info);
 	arizona_free_irq(arizona, jack_irq_rise, info);
-err_input:
+err_gpio:
+	gpiod_put(info->micd_pol_gpio);
 err_register:
 err_register:
 	pm_runtime_disable(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return ret;
 	return ret;
@@ -1470,6 +1533,8 @@ static int arizona_extcon_remove(struct platform_device *pdev)
 	struct arizona *arizona = info->arizona;
 	struct arizona *arizona = info->arizona;
 	int jack_irq_rise, jack_irq_fall;
 	int jack_irq_rise, jack_irq_fall;
 
 
+	gpiod_put(info->micd_pol_gpio);
+
 	pm_runtime_disable(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
 
 	regmap_update_bits(arizona->regmap,
 	regmap_update_bits(arizona->regmap,

+ 0 - 18
drivers/extcon/extcon-gpio.c

@@ -65,22 +65,6 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
-static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
-{
-	struct device *dev = edev->dev.parent;
-	struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev);
-	const char *state;
-
-	if (extcon_get_state(edev))
-		state = extcon_data->state_on;
-	else
-		state = extcon_data->state_off;
-
-	if (state)
-		return sprintf(buf, "%s\n", state);
-	return -EINVAL;
-}
-
 static int gpio_extcon_probe(struct platform_device *pdev)
 static int gpio_extcon_probe(struct platform_device *pdev)
 {
 {
 	struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -110,8 +94,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
 	extcon_data->state_on = pdata->state_on;
 	extcon_data->state_on = pdata->state_on;
 	extcon_data->state_off = pdata->state_off;
 	extcon_data->state_off = pdata->state_off;
 	extcon_data->check_on_resume = pdata->check_on_resume;
 	extcon_data->check_on_resume = pdata->check_on_resume;
-	if (pdata->state_on && pdata->state_off)
-		extcon_data->edev->print_state = extcon_gpio_print_state;
 
 
 	ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
 	ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
 				    pdev->name);
 				    pdev->name);

+ 9 - 0
drivers/extcon/extcon-max77843.c

@@ -781,6 +781,15 @@ static int max77843_muic_probe(struct platform_device *pdev)
 	/* Support virtual irq domain for max77843 MUIC device */
 	/* Support virtual irq domain for max77843 MUIC device */
 	INIT_WORK(&info->irq_work, max77843_muic_irq_work);
 	INIT_WORK(&info->irq_work, max77843_muic_irq_work);
 
 
+	/* Clear IRQ bits before request IRQs */
+	ret = regmap_bulk_read(max77843->regmap_muic,
+			MAX77843_MUIC_REG_INT1, info->status,
+			MAX77843_MUIC_IRQ_NUM);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+		goto err_muic_irq;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
 	for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
 		struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
 		struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
 		unsigned int virq = 0;
 		unsigned int virq = 0;

+ 116 - 18
drivers/extcon/extcon-palmas.c

@@ -28,6 +28,11 @@
 #include <linux/mfd/palmas.h>
 #include <linux/mfd/palmas.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
+
+#define USB_GPIO_DEBOUNCE_MS	20	/* ms */
 
 
 static const unsigned int palmas_extcon_cable[] = {
 static const unsigned int palmas_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB,
@@ -35,8 +40,6 @@ static const unsigned int palmas_extcon_cable[] = {
 	EXTCON_NONE,
 	EXTCON_NONE,
 };
 };
 
 
-static const int mutually_exclusive[] = {0x3, 0x0};
-
 static void palmas_usb_wakeup(struct palmas *palmas, int enable)
 static void palmas_usb_wakeup(struct palmas *palmas, int enable)
 {
 {
 	if (enable)
 	if (enable)
@@ -120,19 +123,54 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+static void palmas_gpio_id_detect(struct work_struct *work)
+{
+	int id;
+	struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
+						     struct palmas_usb,
+						     wq_detectid);
+	struct extcon_dev *edev = palmas_usb->edev;
+
+	if (!palmas_usb->id_gpiod)
+		return;
+
+	id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
+
+	if (id) {
+		extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+		dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+	} else {
+		extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+		dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+	}
+}
+
+static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb)
+{
+	struct palmas_usb *palmas_usb = _palmas_usb;
+
+	queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid,
+			   palmas_usb->sw_debounce_jiffies);
+
+	return IRQ_HANDLED;
+}
+
 static void palmas_enable_irq(struct palmas_usb *palmas_usb)
 static void palmas_enable_irq(struct palmas_usb *palmas_usb)
 {
 {
 	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
 	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
 		PALMAS_USB_VBUS_CTRL_SET,
 		PALMAS_USB_VBUS_CTRL_SET,
 		PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
 		PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
 
 
-	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
-		PALMAS_USB_ID_CTRL_SET, PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
+	if (palmas_usb->enable_id_detection) {
+		palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+			     PALMAS_USB_ID_CTRL_SET,
+			     PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
 
 
-	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
-		PALMAS_USB_ID_INT_EN_HI_SET,
-		PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
-		PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+		palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+			     PALMAS_USB_ID_INT_EN_HI_SET,
+			     PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
+			     PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+	}
 
 
 	if (palmas_usb->enable_vbus_detection)
 	if (palmas_usb->enable_vbus_detection)
 		palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
 		palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
@@ -171,20 +209,37 @@ static int palmas_usb_probe(struct platform_device *pdev)
 			palmas_usb->wakeup = pdata->wakeup;
 			palmas_usb->wakeup = pdata->wakeup;
 	}
 	}
 
 
+	palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
+							GPIOD_IN);
+	if (IS_ERR(palmas_usb->id_gpiod)) {
+		dev_err(&pdev->dev, "failed to get id gpio\n");
+		return PTR_ERR(palmas_usb->id_gpiod);
+	}
+
+	if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
+		palmas_usb->enable_id_detection = false;
+		palmas_usb->enable_gpio_id_detection = true;
+	}
+
+	if (palmas_usb->enable_gpio_id_detection) {
+		u32 debounce;
+
+		if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
+			debounce = USB_GPIO_DEBOUNCE_MS;
+
+		status = gpiod_set_debounce(palmas_usb->id_gpiod,
+					    debounce * 1000);
+		if (status < 0)
+			palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
+	}
+
+	INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect);
+
 	palmas->usb = palmas_usb;
 	palmas->usb = palmas_usb;
 	palmas_usb->palmas = palmas;
 	palmas_usb->palmas = palmas;
 
 
 	palmas_usb->dev	 = &pdev->dev;
 	palmas_usb->dev	 = &pdev->dev;
 
 
-	palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_ID_OTG_IRQ);
-	palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_ID_IRQ);
-	palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_VBUS_OTG_IRQ);
-	palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_VBUS_IRQ);
-
 	palmas_usb_wakeup(palmas, palmas_usb->wakeup);
 	palmas_usb_wakeup(palmas, palmas_usb->wakeup);
 
 
 	platform_set_drvdata(pdev, palmas_usb);
 	platform_set_drvdata(pdev, palmas_usb);
@@ -195,7 +250,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "failed to allocate extcon device\n");
 		dev_err(&pdev->dev, "failed to allocate extcon device\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	palmas_usb->edev->mutually_exclusive = mutually_exclusive;
 
 
 	status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
 	status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
 	if (status) {
 	if (status) {
@@ -204,6 +258,10 @@ static int palmas_usb_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	if (palmas_usb->enable_id_detection) {
 	if (palmas_usb->enable_id_detection) {
+		palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+							     PALMAS_ID_OTG_IRQ);
+		palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
+							 PALMAS_ID_IRQ);
 		status = devm_request_threaded_irq(palmas_usb->dev,
 		status = devm_request_threaded_irq(palmas_usb->dev,
 				palmas_usb->id_irq,
 				palmas_usb->id_irq,
 				NULL, palmas_id_irq_handler,
 				NULL, palmas_id_irq_handler,
@@ -215,9 +273,33 @@ static int palmas_usb_probe(struct platform_device *pdev)
 					palmas_usb->id_irq, status);
 					palmas_usb->id_irq, status);
 			return status;
 			return status;
 		}
 		}
+	} else if (palmas_usb->enable_gpio_id_detection) {
+		palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod);
+		if (palmas_usb->gpio_id_irq < 0) {
+			dev_err(&pdev->dev, "failed to get id irq\n");
+			return palmas_usb->gpio_id_irq;
+		}
+		status = devm_request_threaded_irq(&pdev->dev,
+						   palmas_usb->gpio_id_irq,
+						   NULL,
+						   palmas_gpio_id_irq_handler,
+						   IRQF_TRIGGER_RISING |
+						   IRQF_TRIGGER_FALLING |
+						   IRQF_ONESHOT,
+						   "palmas_usb_id",
+						   palmas_usb);
+		if (status < 0) {
+			dev_err(&pdev->dev,
+				"failed to request handler for id irq\n");
+			return status;
+		}
 	}
 	}
 
 
 	if (palmas_usb->enable_vbus_detection) {
 	if (palmas_usb->enable_vbus_detection) {
+		palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+						       PALMAS_VBUS_OTG_IRQ);
+		palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
+							   PALMAS_VBUS_IRQ);
 		status = devm_request_threaded_irq(palmas_usb->dev,
 		status = devm_request_threaded_irq(palmas_usb->dev,
 				palmas_usb->vbus_irq, NULL,
 				palmas_usb->vbus_irq, NULL,
 				palmas_vbus_irq_handler,
 				palmas_vbus_irq_handler,
@@ -232,10 +314,21 @@ static int palmas_usb_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	palmas_enable_irq(palmas_usb);
 	palmas_enable_irq(palmas_usb);
+	/* perform initial detection */
+	palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
 	device_set_wakeup_capable(&pdev->dev, true);
 	device_set_wakeup_capable(&pdev->dev, true);
 	return 0;
 	return 0;
 }
 }
 
 
+static int palmas_usb_remove(struct platform_device *pdev)
+{
+	struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
+
+	cancel_delayed_work_sync(&palmas_usb->wq_detectid);
+
+	return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_PM_SLEEP
 static int palmas_usb_suspend(struct device *dev)
 static int palmas_usb_suspend(struct device *dev)
 {
 {
@@ -246,6 +339,8 @@ static int palmas_usb_suspend(struct device *dev)
 			enable_irq_wake(palmas_usb->vbus_irq);
 			enable_irq_wake(palmas_usb->vbus_irq);
 		if (palmas_usb->enable_id_detection)
 		if (palmas_usb->enable_id_detection)
 			enable_irq_wake(palmas_usb->id_irq);
 			enable_irq_wake(palmas_usb->id_irq);
+		if (palmas_usb->enable_gpio_id_detection)
+			enable_irq_wake(palmas_usb->gpio_id_irq);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -259,6 +354,8 @@ static int palmas_usb_resume(struct device *dev)
 			disable_irq_wake(palmas_usb->vbus_irq);
 			disable_irq_wake(palmas_usb->vbus_irq);
 		if (palmas_usb->enable_id_detection)
 		if (palmas_usb->enable_id_detection)
 			disable_irq_wake(palmas_usb->id_irq);
 			disable_irq_wake(palmas_usb->id_irq);
+		if (palmas_usb->enable_gpio_id_detection)
+			disable_irq_wake(palmas_usb->gpio_id_irq);
 	}
 	}
 	return 0;
 	return 0;
 };
 };
@@ -276,6 +373,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
 
 
 static struct platform_driver palmas_usb_driver = {
 static struct platform_driver palmas_usb_driver = {
 	.probe = palmas_usb_probe,
 	.probe = palmas_usb_probe,
+	.remove = palmas_usb_remove,
 	.driver = {
 	.driver = {
 		.name = "palmas-usb",
 		.name = "palmas-usb",
 		.of_match_table = of_palmas_match_tbl,
 		.of_match_table = of_palmas_match_tbl,

+ 0 - 1
drivers/extcon/extcon-rt8973a.c

@@ -693,7 +693,6 @@ MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id);
 static struct i2c_driver rt8973a_muic_i2c_driver = {
 static struct i2c_driver rt8973a_muic_i2c_driver = {
 	.driver		= {
 	.driver		= {
 		.name	= "rt8973a",
 		.name	= "rt8973a",
-		.owner	= THIS_MODULE,
 		.pm	= &rt8973a_muic_pm_ops,
 		.pm	= &rt8973a_muic_pm_ops,
 		.of_match_table = rt8973a_dt_match,
 		.of_match_table = rt8973a_dt_match,
 	},
 	},

+ 0 - 1
drivers/extcon/extcon-sm5502.c

@@ -685,7 +685,6 @@ MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
 static struct i2c_driver sm5502_muic_i2c_driver = {
 static struct i2c_driver sm5502_muic_i2c_driver = {
 	.driver		= {
 	.driver		= {
 		.name	= "sm5502",
 		.name	= "sm5502",
-		.owner	= THIS_MODULE,
 		.pm	= &sm5502_muic_pm_ops,
 		.pm	= &sm5502_muic_pm_ops,
 		.of_match_table = sm5502_dt_match,
 		.of_match_table = sm5502_dt_match,
 	},
 	},

+ 1 - 0
drivers/extcon/extcon-usb-gpio.c

@@ -15,6 +15,7 @@
  */
  */
 
 
 #include <linux/extcon.h>
 #include <linux/extcon.h>
+#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>

+ 35 - 13
drivers/extcon/extcon.c

@@ -126,7 +126,7 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
 
 
 static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 {
 {
-	unsigned int id = -EINVAL;
+	int id = -EINVAL;
 	int i = 0;
 	int i = 0;
 
 
 	/* Find the id of extcon cable */
 	/* Find the id of extcon cable */
@@ -143,7 +143,7 @@ static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 
 
 static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
 static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
 {
 {
-	unsigned int id;
+	int id;
 
 
 	if (edev->max_supported == 0)
 	if (edev->max_supported == 0)
 		return -EINVAL;
 		return -EINVAL;
@@ -172,14 +172,6 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 	int i, count = 0;
 	int i, count = 0;
 	struct extcon_dev *edev = dev_get_drvdata(dev);
 	struct extcon_dev *edev = dev_get_drvdata(dev);
 
 
-	if (edev->print_state) {
-		int ret = edev->print_state(edev, buf);
-
-		if (ret >= 0)
-			return ret;
-		/* Use default if failed */
-	}
-
 	if (edev->max_supported == 0)
 	if (edev->max_supported == 0)
 		return sprintf(buf, "%u\n", edev->state);
 		return sprintf(buf, "%u\n", edev->state);
 
 
@@ -272,6 +264,9 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
 	unsigned long flags;
 	unsigned long flags;
 	bool attached;
 	bool attached;
 
 
+	if (!edev)
+		return -EINVAL;
+
 	spin_lock_irqsave(&edev->lock, flags);
 	spin_lock_irqsave(&edev->lock, flags);
 
 
 	if (edev->state != ((edev->state & ~mask) | (state & mask))) {
 	if (edev->state != ((edev->state & ~mask) | (state & mask))) {
@@ -345,6 +340,9 @@ EXPORT_SYMBOL_GPL(extcon_update_state);
  */
  */
 int extcon_set_state(struct extcon_dev *edev, u32 state)
 int extcon_set_state(struct extcon_dev *edev, u32 state)
 {
 {
+	if (!edev)
+		return -EINVAL;
+
 	return extcon_update_state(edev, 0xffffffff, state);
 	return extcon_update_state(edev, 0xffffffff, state);
 }
 }
 EXPORT_SYMBOL_GPL(extcon_set_state);
 EXPORT_SYMBOL_GPL(extcon_set_state);
@@ -358,6 +356,9 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
 {
 {
 	int index;
 	int index;
 
 
+	if (!edev)
+		return -EINVAL;
+
 	index = find_cable_index_by_id(edev, id);
 	index = find_cable_index_by_id(edev, id);
 	if (index < 0)
 	if (index < 0)
 		return index;
 		return index;
@@ -378,7 +379,7 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
  */
  */
 int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
 int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
 {
 {
-	unsigned int id;
+	int id;
 
 
 	id = find_cable_id_by_name(edev, cable_name);
 	id = find_cable_id_by_name(edev, cable_name);
 	if (id < 0)
 	if (id < 0)
@@ -402,6 +403,9 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
 	u32 state;
 	u32 state;
 	int index;
 	int index;
 
 
+	if (!edev)
+		return -EINVAL;
+
 	index = find_cable_index_by_id(edev, id);
 	index = find_cable_index_by_id(edev, id);
 	if (index < 0)
 	if (index < 0)
 		return index;
 		return index;
@@ -426,7 +430,7 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
 int extcon_set_cable_state(struct extcon_dev *edev,
 int extcon_set_cable_state(struct extcon_dev *edev,
 			const char *cable_name, bool cable_state)
 			const char *cable_name, bool cable_state)
 {
 {
-	unsigned int id;
+	int id;
 
 
 	id = find_cable_id_by_name(edev, cable_name);
 	id = find_cable_id_by_name(edev, cable_name);
 	if (id < 0)
 	if (id < 0)
@@ -444,6 +448,9 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
 {
 	struct extcon_dev *sd;
 	struct extcon_dev *sd;
 
 
+	if (!extcon_name)
+		return ERR_PTR(-EINVAL);
+
 	mutex_lock(&extcon_dev_list_lock);
 	mutex_lock(&extcon_dev_list_lock);
 	list_for_each_entry(sd, &extcon_dev_list, entry) {
 	list_for_each_entry(sd, &extcon_dev_list, entry) {
 		if (!strcmp(sd->name, extcon_name))
 		if (!strcmp(sd->name, extcon_name))
@@ -572,6 +579,9 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
 	unsigned long flags;
 	unsigned long flags;
 	int ret, idx;
 	int ret, idx;
 
 
+	if (!edev || !nb)
+		return -EINVAL;
+
 	idx = find_cable_index_by_id(edev, id);
 	idx = find_cable_index_by_id(edev, id);
 
 
 	spin_lock_irqsave(&edev->lock, flags);
 	spin_lock_irqsave(&edev->lock, flags);
@@ -594,6 +604,9 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
 	unsigned long flags;
 	unsigned long flags;
 	int ret, idx;
 	int ret, idx;
 
 
+	if (!edev || !nb)
+		return -EINVAL;
+
 	idx = find_cable_index_by_id(edev, id);
 	idx = find_cable_index_by_id(edev, id);
 
 
 	spin_lock_irqsave(&edev->lock, flags);
 	spin_lock_irqsave(&edev->lock, flags);
@@ -654,6 +667,9 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
 {
 {
 	struct extcon_dev *edev;
 	struct extcon_dev *edev;
 
 
+	if (!supported_cable)
+		return ERR_PTR(-EINVAL);
+
 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
 	if (!edev)
 	if (!edev)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -754,7 +770,7 @@ int extcon_dev_register(struct extcon_dev *edev)
 			return ret;
 			return ret;
 	}
 	}
 
 
-	if (!edev->supported_cable)
+	if (!edev || !edev->supported_cable)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	for (; edev->supported_cable[index] != EXTCON_NONE; index++);
 	for (; edev->supported_cable[index] != EXTCON_NONE; index++);
@@ -960,6 +976,9 @@ void extcon_dev_unregister(struct extcon_dev *edev)
 {
 {
 	int index;
 	int index;
 
 
+	if (!edev)
+		return;
+
 	mutex_lock(&extcon_dev_list_lock);
 	mutex_lock(&extcon_dev_list_lock);
 	list_del(&edev->entry);
 	list_del(&edev->entry);
 	mutex_unlock(&extcon_dev_list_lock);
 	mutex_unlock(&extcon_dev_list_lock);
@@ -1066,6 +1085,9 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
 	struct device_node *node;
 	struct device_node *node;
 	struct extcon_dev *edev;
 	struct extcon_dev *edev;
 
 
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
 	if (!dev->of_node) {
 	if (!dev->of_node) {
 		dev_err(dev, "device does not have a device node entry\n");
 		dev_err(dev, "device does not have a device node entry\n");
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);

+ 3 - 1
drivers/hv/channel.c

@@ -601,6 +601,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	u64 aligned_data = 0;
 	u64 aligned_data = 0;
 	int ret;
 	int ret;
 	bool signal = false;
 	bool signal = false;
+	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
 
 
 	/* Setup the descriptor */
 	/* Setup the descriptor */
@@ -618,7 +619,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
+				  &signal);
 
 
 	/*
 	/*
 	 * Signalling the host is conditional on many factors:
 	 * Signalling the host is conditional on many factors:

+ 28 - 6
drivers/hv/channel_mgmt.c

@@ -347,6 +347,7 @@ enum {
 	IDE = 0,
 	IDE = 0,
 	SCSI,
 	SCSI,
 	NIC,
 	NIC,
+	ND_NIC,
 	MAX_PERF_CHN,
 	MAX_PERF_CHN,
 };
 };
 
 
@@ -391,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
 	struct vmbus_channel *primary = channel->primary_channel;
 	struct vmbus_channel *primary = channel->primary_channel;
 	int next_node;
 	int next_node;
 	struct cpumask available_mask;
 	struct cpumask available_mask;
+	struct cpumask *alloced_mask;
 
 
 	for (i = IDE; i < MAX_PERF_CHN; i++) {
 	for (i = IDE; i < MAX_PERF_CHN; i++) {
 		if (!memcmp(type_guid->b, hp_devs[i].guid,
 		if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -408,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
 		 * channel, bind it to cpu 0.
 		 * channel, bind it to cpu 0.
 		 */
 		 */
 		channel->numa_node = 0;
 		channel->numa_node = 0;
-		cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
 		channel->target_cpu = 0;
 		channel->target_cpu = 0;
 		channel->target_vp = hv_context.vp_index[0];
 		channel->target_vp = hv_context.vp_index[0];
 		return;
 		return;
@@ -433,21 +434,38 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
 		channel->numa_node = next_node;
 		channel->numa_node = next_node;
 		primary = channel;
 		primary = channel;
 	}
 	}
+	alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
 
 
-	if (cpumask_weight(&primary->alloced_cpus_in_node) ==
+	if (cpumask_weight(alloced_mask) ==
 	    cpumask_weight(cpumask_of_node(primary->numa_node))) {
 	    cpumask_weight(cpumask_of_node(primary->numa_node))) {
 		/*
 		/*
 		 * We have cycled through all the CPUs in the node;
 		 * We have cycled through all the CPUs in the node;
 		 * reset the alloced map.
 		 * reset the alloced map.
 		 */
 		 */
-		cpumask_clear(&primary->alloced_cpus_in_node);
+		cpumask_clear(alloced_mask);
 	}
 	}
 
 
-	cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
+	cpumask_xor(&available_mask, alloced_mask,
 		    cpumask_of_node(primary->numa_node));
 		    cpumask_of_node(primary->numa_node));
 
 
-	cur_cpu = cpumask_next(-1, &available_mask);
-	cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
+	cur_cpu = -1;
+	while (true) {
+		cur_cpu = cpumask_next(cur_cpu, &available_mask);
+		if (cur_cpu >= nr_cpu_ids) {
+			cur_cpu = -1;
+			cpumask_copy(&available_mask,
+				     cpumask_of_node(primary->numa_node));
+			continue;
+		}
+
+		if (!cpumask_test_cpu(cur_cpu,
+				&primary->alloced_cpus_in_node)) {
+			cpumask_set_cpu(cur_cpu,
+					&primary->alloced_cpus_in_node);
+			cpumask_set_cpu(cur_cpu, alloced_mask);
+			break;
+		}
+	}
 
 
 	channel->target_cpu = cur_cpu;
 	channel->target_cpu = cur_cpu;
 	channel->target_vp = hv_context.vp_index[cur_cpu];
 	channel->target_vp = hv_context.vp_index[cur_cpu];
@@ -469,6 +487,10 @@ void vmbus_initiate_unload(void)
 {
 {
 	struct vmbus_channel_message_header hdr;
 	struct vmbus_channel_message_header hdr;
 
 
+	/* Pre-Win2012R2 hosts don't support reconnect */
+	if (vmbus_proto_version < VERSION_WIN8_1)
+		return;
+
 	init_completion(&vmbus_connection.unload_event);
 	init_completion(&vmbus_connection.unload_event);
 	memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
 	memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
 	hdr.msgtype = CHANNELMSG_UNLOAD;
 	hdr.msgtype = CHANNELMSG_UNLOAD;

+ 119 - 33
drivers/hv/hv.c

@@ -93,11 +93,14 @@ static int query_hypervisor_info(void)
  */
  */
 static u64 do_hypercall(u64 control, void *input, void *output)
 static u64 do_hypercall(u64 control, void *input, void *output)
 {
 {
-#ifdef CONFIG_X86_64
-	u64 hv_status = 0;
 	u64 input_address = (input) ? virt_to_phys(input) : 0;
 	u64 input_address = (input) ? virt_to_phys(input) : 0;
 	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	void *hypercall_page = hv_context.hypercall_page;
 	void *hypercall_page = hv_context.hypercall_page;
+#ifdef CONFIG_X86_64
+	u64 hv_status = 0;
+
+	if (!hypercall_page)
+		return (u64)ULLONG_MAX;
 
 
 	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
 	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
 	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
 	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
@@ -112,13 +115,13 @@ static u64 do_hypercall(u64 control, void *input, void *output)
 	u32 control_lo = control & 0xFFFFFFFF;
 	u32 control_lo = control & 0xFFFFFFFF;
 	u32 hv_status_hi = 1;
 	u32 hv_status_hi = 1;
 	u32 hv_status_lo = 1;
 	u32 hv_status_lo = 1;
-	u64 input_address = (input) ? virt_to_phys(input) : 0;
 	u32 input_address_hi = input_address >> 32;
 	u32 input_address_hi = input_address >> 32;
 	u32 input_address_lo = input_address & 0xFFFFFFFF;
 	u32 input_address_lo = input_address & 0xFFFFFFFF;
-	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	u32 output_address_hi = output_address >> 32;
 	u32 output_address_hi = output_address >> 32;
 	u32 output_address_lo = output_address & 0xFFFFFFFF;
 	u32 output_address_lo = output_address & 0xFFFFFFFF;
-	void *hypercall_page = hv_context.hypercall_page;
+
+	if (!hypercall_page)
+		return (u64)ULLONG_MAX;
 
 
 	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
 	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
 			      "=a"(hv_status_lo) : "d" (control_hi),
 			      "=a"(hv_status_lo) : "d" (control_hi),
@@ -130,6 +133,56 @@ static u64 do_hypercall(u64 control, void *input, void *output)
 #endif /* !x86_64 */
 #endif /* !x86_64 */
 }
 }
 
 
+#ifdef CONFIG_X86_64
+static cycle_t read_hv_clock_tsc(struct clocksource *arg)
+{
+	cycle_t current_tick;
+	struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
+
+	if (tsc_pg->tsc_sequence != -1) {
+		/*
+		 * Use the tsc page to compute the value.
+		 */
+
+		while (1) {
+			cycle_t tmp;
+			u32 sequence = tsc_pg->tsc_sequence;
+			u64 cur_tsc;
+			u64 scale = tsc_pg->tsc_scale;
+			s64 offset = tsc_pg->tsc_offset;
+
+			rdtscll(cur_tsc);
+			/* current_tick = ((cur_tsc *scale) >> 64) + offset */
+			asm("mulq %3"
+				: "=d" (current_tick), "=a" (tmp)
+				: "a" (cur_tsc), "r" (scale));
+
+			current_tick += offset;
+			if (tsc_pg->tsc_sequence == sequence)
+				return current_tick;
+
+			if (tsc_pg->tsc_sequence != -1)
+				continue;
+			/*
+			 * Fallback using MSR method.
+			 */
+			break;
+		}
+	}
+	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+	return current_tick;
+}
+
+static struct clocksource hyperv_cs_tsc = {
+		.name           = "hyperv_clocksource_tsc_page",
+		.rating         = 425,
+		.read           = read_hv_clock_tsc,
+		.mask           = CLOCKSOURCE_MASK(64),
+		.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+
 /*
 /*
  * hv_init - Main initialization routine.
  * hv_init - Main initialization routine.
  *
  *
@@ -139,7 +192,9 @@ int hv_init(void)
 {
 {
 	int max_leaf;
 	int max_leaf;
 	union hv_x64_msr_hypercall_contents hypercall_msr;
 	union hv_x64_msr_hypercall_contents hypercall_msr;
+	union hv_x64_msr_hypercall_contents tsc_msr;
 	void *virtaddr = NULL;
 	void *virtaddr = NULL;
+	void *va_tsc = NULL;
 
 
 	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
 	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
 	memset(hv_context.synic_message_page, 0,
 	memset(hv_context.synic_message_page, 0,
@@ -183,6 +238,22 @@ int hv_init(void)
 
 
 	hv_context.hypercall_page = virtaddr;
 	hv_context.hypercall_page = virtaddr;
 
 
+#ifdef CONFIG_X86_64
+	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+		va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+		if (!va_tsc)
+			goto cleanup;
+		hv_context.tsc_page = va_tsc;
+
+		rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+
+		tsc_msr.enable = 1;
+		tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
+
+		wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+	}
+#endif
 	return 0;
 	return 0;
 
 
 cleanup:
 cleanup:
@@ -216,6 +287,21 @@ void hv_cleanup(void)
 		vfree(hv_context.hypercall_page);
 		vfree(hv_context.hypercall_page);
 		hv_context.hypercall_page = NULL;
 		hv_context.hypercall_page = NULL;
 	}
 	}
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Cleanup the TSC page based CS.
+	 */
+	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+		clocksource_change_rating(&hyperv_cs_tsc, 10);
+		clocksource_unregister(&hyperv_cs_tsc);
+
+		hypercall_msr.as_uint64 = 0;
+		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
+		vfree(hv_context.tsc_page);
+		hv_context.tsc_page = NULL;
+	}
+#endif
 }
 }
 
 
 /*
 /*
@@ -271,7 +357,7 @@ static int hv_ce_set_next_event(unsigned long delta,
 {
 {
 	cycle_t current_tick;
 	cycle_t current_tick;
 
 
-	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+	WARN_ON(!clockevent_state_oneshot(evt));
 
 
 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
 	current_tick += delta;
 	current_tick += delta;
@@ -279,31 +365,24 @@ static int hv_ce_set_next_event(unsigned long delta,
 	return 0;
 	return 0;
 }
 }
 
 
-static void hv_ce_setmode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int hv_ce_shutdown(struct clock_event_device *evt)
+{
+	wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
+	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+
+	return 0;
+}
+
+static int hv_ce_set_oneshot(struct clock_event_device *evt)
 {
 {
 	union hv_timer_config timer_cfg;
 	union hv_timer_config timer_cfg;
 
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* unsupported */
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		timer_cfg.enable = 1;
-		timer_cfg.auto_enable = 1;
-		timer_cfg.sintx = VMBUS_MESSAGE_SINT;
-		wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
-		wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	timer_cfg.enable = 1;
+	timer_cfg.auto_enable = 1;
+	timer_cfg.sintx = VMBUS_MESSAGE_SINT;
+	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+
+	return 0;
 }
 }
 
 
 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
@@ -318,7 +397,8 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
 	 * references to the hv_vmbus module making it impossible to unload.
 	 * references to the hv_vmbus module making it impossible to unload.
 	 */
 	 */
 
 
-	dev->set_mode = hv_ce_setmode;
+	dev->set_state_shutdown = hv_ce_shutdown;
+	dev->set_state_oneshot = hv_ce_set_oneshot;
 	dev->set_next_event = hv_ce_set_next_event;
 	dev->set_next_event = hv_ce_set_next_event;
 }
 }
 
 
@@ -329,6 +409,13 @@ int hv_synic_alloc(void)
 	size_t ced_size = sizeof(struct clock_event_device);
 	size_t ced_size = sizeof(struct clock_event_device);
 	int cpu;
 	int cpu;
 
 
+	hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
+					 GFP_ATOMIC);
+	if (hv_context.hv_numa_map == NULL) {
+		pr_err("Unable to allocate NUMA map\n");
+		goto err;
+	}
+
 	for_each_online_cpu(cpu) {
 	for_each_online_cpu(cpu) {
 		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
 		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
 		if (hv_context.event_dpc[cpu] == NULL) {
 		if (hv_context.event_dpc[cpu] == NULL) {
@@ -342,6 +429,7 @@ int hv_synic_alloc(void)
 			pr_err("Unable to allocate clock event device\n");
 			pr_err("Unable to allocate clock event device\n");
 			goto err;
 			goto err;
 		}
 		}
+
 		hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
 		hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
 
 
 		hv_context.synic_message_page[cpu] =
 		hv_context.synic_message_page[cpu] =
@@ -390,6 +478,7 @@ void hv_synic_free(void)
 {
 {
 	int cpu;
 	int cpu;
 
 
+	kfree(hv_context.hv_numa_map);
 	for_each_online_cpu(cpu)
 	for_each_online_cpu(cpu)
 		hv_synic_free_cpu(cpu);
 		hv_synic_free_cpu(cpu);
 }
 }
@@ -503,8 +592,7 @@ void hv_synic_cleanup(void *arg)
 
 
 	/* Turn off clockevent device */
 	/* Turn off clockevent device */
 	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
 	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
-		hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
-			      hv_context.clk_evt[cpu]);
+		hv_ce_shutdown(hv_context.clk_evt[cpu]);
 
 
 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
 
@@ -530,6 +618,4 @@ void hv_synic_cleanup(void *arg)
 	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 	sctrl.enable = 0;
 	sctrl.enable = 0;
 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
-
-	hv_synic_free_cpu(cpu);
 }
 }

+ 20 - 6
drivers/hv/hv_balloon.c

@@ -62,11 +62,13 @@
 enum {
 enum {
 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
 
 
 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
 
 
-	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
+	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
 };
 };
 
 
 
 
@@ -1296,13 +1298,25 @@ static void version_resp(struct hv_dynmem_device *dm,
 	if (dm->next_version == 0)
 	if (dm->next_version == 0)
 		goto version_error;
 		goto version_error;
 
 
-	dm->next_version = 0;
 	memset(&version_req, 0, sizeof(struct dm_version_request));
 	memset(&version_req, 0, sizeof(struct dm_version_request));
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
-	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
-	version_req.is_last_attempt = 1;
+	version_req.version.version = dm->next_version;
+
+	/*
+	 * Set the next version to try in case current version fails.
+	 * Win7 protocol ought to be the last one to try.
+	 */
+	switch (version_req.version.version) {
+	case DYNMEM_PROTOCOL_VERSION_WIN8:
+		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+		version_req.is_last_attempt = 0;
+		break;
+	default:
+		dm->next_version = 0;
+		version_req.is_last_attempt = 1;
+	}
 
 
 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
 				sizeof(struct dm_version_request),
 				sizeof(struct dm_version_request),
@@ -1442,7 +1456,7 @@ static int balloon_probe(struct hv_device *dev,
 
 
 	dm_device.dev = dev;
 	dm_device.dev = dev;
 	dm_device.state = DM_INITIALIZING;
 	dm_device.state = DM_INITIALIZING;
-	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
 	init_completion(&dm_device.host_event);
 	init_completion(&dm_device.host_event);
 	init_completion(&dm_device.config_event);
 	init_completion(&dm_device.config_event);
 	INIT_LIST_HEAD(&dm_device.ha_region_list);
 	INIT_LIST_HEAD(&dm_device.ha_region_list);
@@ -1474,7 +1488,7 @@ static int balloon_probe(struct hv_device *dev,
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
-	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
+	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
 	version_req.is_last_attempt = 0;
 	version_req.is_last_attempt = 0;
 
 
 	ret = vmbus_sendpacket(dev->channel, &version_req,
 	ret = vmbus_sendpacket(dev->channel, &version_req,

+ 13 - 8
drivers/hv/hv_fcopy.c

@@ -116,7 +116,7 @@ static int fcopy_handle_handshake(u32 version)
 
 
 static void fcopy_send_data(struct work_struct *dummy)
 static void fcopy_send_data(struct work_struct *dummy)
 {
 {
-	struct hv_start_fcopy smsg_out;
+	struct hv_start_fcopy *smsg_out = NULL;
 	int operation = fcopy_transaction.fcopy_msg->operation;
 	int operation = fcopy_transaction.fcopy_msg->operation;
 	struct hv_start_fcopy *smsg_in;
 	struct hv_start_fcopy *smsg_in;
 	void *out_src;
 	void *out_src;
@@ -136,21 +136,24 @@ static void fcopy_send_data(struct work_struct *dummy)
 	switch (operation) {
 	switch (operation) {
 	case START_FILE_COPY:
 	case START_FILE_COPY:
 		out_len = sizeof(struct hv_start_fcopy);
 		out_len = sizeof(struct hv_start_fcopy);
-		memset(&smsg_out, 0, out_len);
-		smsg_out.hdr.operation = operation;
+		smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
+		if (!smsg_out)
+			return;
+
+		smsg_out->hdr.operation = operation;
 		smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
 		smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
 
 
 		utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
 		utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
 				UTF16_LITTLE_ENDIAN,
 				UTF16_LITTLE_ENDIAN,
-				(__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
+				(__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
 
 
 		utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
 		utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
 				UTF16_LITTLE_ENDIAN,
 				UTF16_LITTLE_ENDIAN,
-				(__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
+				(__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
 
 
-		smsg_out.copy_flags = smsg_in->copy_flags;
-		smsg_out.file_size = smsg_in->file_size;
-		out_src = &smsg_out;
+		smsg_out->copy_flags = smsg_in->copy_flags;
+		smsg_out->file_size = smsg_in->file_size;
+		out_src = smsg_out;
 		break;
 		break;
 
 
 	default:
 	default:
@@ -168,6 +171,8 @@ static void fcopy_send_data(struct work_struct *dummy)
 			fcopy_transaction.state = HVUTIL_READY;
 			fcopy_transaction.state = HVUTIL_READY;
 		}
 		}
 	}
 	}
+	kfree(smsg_out);
+
 	return;
 	return;
 }
 }
 
 

+ 3 - 0
drivers/hv/hv_kvp.c

@@ -353,6 +353,9 @@ kvp_send_key(struct work_struct *dummy)
 		return;
 		return;
 
 
 	message = kzalloc(sizeof(*message), GFP_KERNEL);
 	message = kzalloc(sizeof(*message), GFP_KERNEL);
+	if (!message)
+		return;
+
 	message->kvp_hdr.operation = operation;
 	message->kvp_hdr.operation = operation;
 	message->kvp_hdr.pool = pool;
 	message->kvp_hdr.pool = pool;
 	in_msg = kvp_transaction.kvp_msg;
 	in_msg = kvp_transaction.kvp_msg;

+ 1 - 1
drivers/hv/hv_utils_transport.c

@@ -186,7 +186,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
 		return -EINVAL;
 		return -EINVAL;
 	} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
 	} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
 		cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
 		cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
-		if (!msg)
+		if (!cn_msg)
 			return -ENOMEM;
 			return -ENOMEM;
 		cn_msg->id.idx = hvt->cn_id.idx;
 		cn_msg->id.idx = hvt->cn_id.idx;
 		cn_msg->id.val = hvt->cn_id.val;
 		cn_msg->id.val = hvt->cn_id.val;

+ 15 - 1
drivers/hv/hyperv_vmbus.h

@@ -141,7 +141,7 @@ struct hv_port_info {
 		struct {
 		struct {
 			u32 target_sint;
 			u32 target_sint;
 			u32 target_vp;
 			u32 target_vp;
-			u16 base_flag_bumber;
+			u16 base_flag_number;
 			u16 flag_count;
 			u16 flag_count;
 			u32 rsvdz;
 			u32 rsvdz;
 		} event_port_info;
 		} event_port_info;
@@ -517,6 +517,7 @@ struct hv_context {
 	u64 guestid;
 	u64 guestid;
 
 
 	void *hypercall_page;
 	void *hypercall_page;
+	void *tsc_page;
 
 
 	bool synic_initialized;
 	bool synic_initialized;
 
 
@@ -551,10 +552,23 @@ struct hv_context {
 	 * Support PV clockevent device.
 	 * Support PV clockevent device.
 	 */
 	 */
 	struct clock_event_device *clk_evt[NR_CPUS];
 	struct clock_event_device *clk_evt[NR_CPUS];
+	/*
+	 * To manage allocations in a NUMA node.
+	 * Array indexed by numa node ID.
+	 */
+	struct cpumask *hv_numa_map;
 };
 };
 
 
 extern struct hv_context hv_context;
 extern struct hv_context hv_context;
 
 
+struct ms_hyperv_tsc_page {
+	volatile u32 tsc_sequence;
+	u32 reserved1;
+	volatile u64 tsc_scale;
+	volatile s64 tsc_offset;
+	u64 reserved2[509];
+};
+
 struct hv_ring_buffer_debug_info {
 struct hv_ring_buffer_debug_info {
 	u32 current_interrupt_mask;
 	u32 current_interrupt_mask;
 	u32 current_read_index;
 	u32 current_read_index;

+ 3 - 11
drivers/hv/ring_buffer.c

@@ -103,10 +103,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  *    there is room for the producer to send the pending packet.
  *    there is room for the producer to send the pending packet.
  */
  */
 
 
-static bool hv_need_to_signal_on_read(u32 old_rd,
-					 struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+				      struct hv_ring_buffer_info *rbi)
 {
 {
-	u32 prev_write_sz;
 	u32 cur_write_sz;
 	u32 cur_write_sz;
 	u32 r_size;
 	u32 r_size;
 	u32 write_loc = rbi->ring_buffer->write_index;
 	u32 write_loc = rbi->ring_buffer->write_index;
@@ -123,10 +122,6 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
 			read_loc - write_loc;
 			read_loc - write_loc;
 
 
-	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
-			old_rd - write_loc;
-
-
 	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
 	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
 		return true;
 		return true;
 
 
@@ -517,7 +512,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
 	u32 next_read_location = 0;
 	u32 next_read_location = 0;
 	u64 prev_indices = 0;
 	u64 prev_indices = 0;
 	unsigned long flags;
 	unsigned long flags;
-	u32 old_read;
 
 
 	if (buflen <= 0)
 	if (buflen <= 0)
 		return -EINVAL;
 		return -EINVAL;
@@ -528,8 +522,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
 				&bytes_avail_toread,
 				&bytes_avail_toread,
 				&bytes_avail_towrite);
 				&bytes_avail_towrite);
 
 
-	old_read = bytes_avail_toread;
-
 	/* Make sure there is something to read */
 	/* Make sure there is something to read */
 	if (bytes_avail_toread < buflen) {
 	if (bytes_avail_toread < buflen) {
 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -560,7 +552,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
 
 
 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
 
-	*signal = hv_need_to_signal_on_read(old_read, inring_info);
+	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
 
 	return 0;
 	return 0;
 }
 }

+ 283 - 70
drivers/hv/vmbus_drv.c

@@ -39,6 +39,8 @@
 #include <asm/mshyperv.h>
 #include <asm/mshyperv.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
 #include <linux/ptrace.h>
 #include <linux/ptrace.h>
+#include <linux/screen_info.h>
+#include <linux/kdebug.h>
 #include "hyperv_vmbus.h"
 #include "hyperv_vmbus.h"
 
 
 static struct acpi_device  *hv_acpi_dev;
 static struct acpi_device  *hv_acpi_dev;
@@ -48,12 +50,18 @@ static struct completion probe_event;
 static int irq;
 static int irq;
 
 
 
 
-static int hyperv_panic_event(struct notifier_block *nb,
-			unsigned long event, void *ptr)
+static void hyperv_report_panic(struct pt_regs *regs)
 {
 {
-	struct pt_regs *regs;
+	static bool panic_reported;
 
 
-	regs = current_pt_regs();
+	/*
+	 * We prefer to report panic on 'die' chain as we have proper
+	 * registers to report, but if we miss it (e.g. on BUG()) we need
+	 * to report it on 'panic'.
+	 */
+	if (panic_reported)
+		return;
+	panic_reported = true;
 
 
 	wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
 	wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
 	wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
 	wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
@@ -65,18 +73,37 @@ static int hyperv_panic_event(struct notifier_block *nb,
 	 * Let Hyper-V know there is crash data available
 	 * Let Hyper-V know there is crash data available
 	 */
 	 */
 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+}
+
+static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
+			      void *args)
+{
+	struct pt_regs *regs;
+
+	regs = current_pt_regs();
+
+	hyperv_report_panic(regs);
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }
 
 
+static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
+			    void *args)
+{
+	struct die_args *die = (struct die_args *)args;
+	struct pt_regs *regs = die->regs;
+
+	hyperv_report_panic(regs);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block hyperv_die_block = {
+	.notifier_call = hyperv_die_event,
+};
 static struct notifier_block hyperv_panic_block = {
 static struct notifier_block hyperv_panic_block = {
 	.notifier_call = hyperv_panic_event,
 	.notifier_call = hyperv_panic_event,
 };
 };
 
 
-struct resource hyperv_mmio = {
-	.name  = "hyperv mmio",
-	.flags = IORESOURCE_MEM,
-};
-EXPORT_SYMBOL_GPL(hyperv_mmio);
+struct resource *hyperv_mmio;
 
 
 static int vmbus_exists(void)
 static int vmbus_exists(void)
 {
 {
@@ -414,6 +441,43 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
 }
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
 static DEVICE_ATTR_RO(in_write_bytes_avail);
 
 
+static ssize_t channel_vp_mapping_show(struct device *dev,
+				       struct device_attribute *dev_attr,
+				       char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
+	unsigned long flags;
+	int buf_size = PAGE_SIZE, n_written, tot_written;
+	struct list_head *cur;
+
+	if (!channel)
+		return -ENODEV;
+
+	tot_written = snprintf(buf, buf_size, "%u:%u\n",
+		channel->offermsg.child_relid, channel->target_cpu);
+
+	spin_lock_irqsave(&channel->lock, flags);
+
+	list_for_each(cur, &channel->sc_list) {
+		if (tot_written >= buf_size - 1)
+			break;
+
+		cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
+		n_written = scnprintf(buf + tot_written,
+				     buf_size - tot_written,
+				     "%u:%u\n",
+				     cur_sc->offermsg.child_relid,
+				     cur_sc->target_cpu);
+		tot_written += n_written;
+	}
+
+	spin_unlock_irqrestore(&channel->lock, flags);
+
+	return tot_written;
+}
+static DEVICE_ATTR_RO(channel_vp_mapping);
+
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 static struct attribute *vmbus_attrs[] = {
 static struct attribute *vmbus_attrs[] = {
 	&dev_attr_id.attr,
 	&dev_attr_id.attr,
@@ -438,6 +502,7 @@ static struct attribute *vmbus_attrs[] = {
 	&dev_attr_in_write_index.attr,
 	&dev_attr_in_write_index.attr,
 	&dev_attr_in_read_bytes_avail.attr,
 	&dev_attr_in_read_bytes_avail.attr,
 	&dev_attr_in_write_bytes_avail.attr,
 	&dev_attr_in_write_bytes_avail.attr,
+	&dev_attr_channel_vp_mapping.attr,
 	NULL,
 	NULL,
 };
 };
 ATTRIBUTE_GROUPS(vmbus);
 ATTRIBUTE_GROUPS(vmbus);
@@ -763,38 +828,6 @@ static void vmbus_isr(void)
 	}
 	}
 }
 }
 
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int hyperv_cpu_disable(void)
-{
-	return -ENOSYS;
-}
-
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
-	static void *previous_cpu_disable;
-
-	/*
-	 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
-	 * ...) is not supported at this moment as channel interrupts are
-	 * distributed across all of them.
-	 */
-
-	if ((vmbus_proto_version == VERSION_WS2008) ||
-	    (vmbus_proto_version == VERSION_WIN7))
-		return;
-
-	if (vmbus_loaded) {
-		previous_cpu_disable = smp_ops.cpu_disable;
-		smp_ops.cpu_disable = hyperv_cpu_disable;
-		pr_notice("CPU offlining is not supported by hypervisor\n");
-	} else if (previous_cpu_disable)
-		smp_ops.cpu_disable = previous_cpu_disable;
-}
-#else
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
-}
-#endif
 
 
 /*
 /*
  * vmbus_bus_init -Main vmbus driver initialization routine.
  * vmbus_bus_init -Main vmbus driver initialization routine.
@@ -836,12 +869,14 @@ static int vmbus_bus_init(int irq)
 	if (ret)
 	if (ret)
 		goto err_alloc;
 		goto err_alloc;
 
 
-	hv_cpu_hotplug_quirk(true);
+	if (vmbus_proto_version > VERSION_WIN7)
+		cpu_hotplug_disable();
 
 
 	/*
 	/*
 	 * Only register if the crash MSRs are available
 	 * Only register if the crash MSRs are available
 	 */
 	 */
-	if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+		register_die_notifier(&hyperv_die_block);
 		atomic_notifier_chain_register(&panic_notifier_list,
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &hyperv_panic_block);
 					       &hyperv_panic_block);
 	}
 	}
@@ -863,8 +898,8 @@ err_cleanup:
 }
 }
 
 
 /**
 /**
- * __vmbus_child_driver_register - Register a vmbus's driver
- * @drv: Pointer to driver structure you want to register
+ * __vmbus_child_driver_register() - Register a vmbus's driver
+ * @hv_driver: Pointer to driver structure you want to register
  * @owner: owner module of the drv
  * @owner: owner module of the drv
  * @mod_name: module name string
  * @mod_name: module name string
  *
  *
@@ -896,7 +931,8 @@ EXPORT_SYMBOL_GPL(__vmbus_driver_register);
 
 
 /**
 /**
  * vmbus_driver_unregister() - Unregister a vmbus's driver
  * vmbus_driver_unregister() - Unregister a vmbus's driver
- * @drv: Pointer to driver structure you want to un-register
+ * @hv_driver: Pointer to driver structure you want to
+ *             un-register
  *
  *
  * Un-register the given driver that was previous registered with a call to
  * Un-register the given driver that was previous registered with a call to
  * vmbus_driver_register()
  * vmbus_driver_register()
@@ -982,30 +1018,184 @@ void vmbus_device_unregister(struct hv_device *device_obj)
 
 
 
 
 /*
 /*
- * VMBUS is an acpi enumerated device. Get the the information we
+ * VMBUS is an acpi enumerated device. Get the information we
  * need from DSDT.
  * need from DSDT.
  */
  */
-
+#define VTPM_BASE_ADDRESS 0xfed40000
 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
 {
 {
+	resource_size_t start = 0;
+	resource_size_t end = 0;
+	struct resource *new_res;
+	struct resource **old_res = &hyperv_mmio;
+	struct resource **prev_res = NULL;
+
 	switch (res->type) {
 	switch (res->type) {
 	case ACPI_RESOURCE_TYPE_IRQ:
 	case ACPI_RESOURCE_TYPE_IRQ:
 		irq = res->data.irq.interrupts[0];
 		irq = res->data.irq.interrupts[0];
+		return AE_OK;
+
+	/*
+	 * "Address" descriptors are for bus windows. Ignore
+	 * "memory" descriptors, which are for registers on
+	 * devices.
+	 */
+	case ACPI_RESOURCE_TYPE_ADDRESS32:
+		start = res->data.address32.address.minimum;
+		end = res->data.address32.address.maximum;
 		break;
 		break;
 
 
 	case ACPI_RESOURCE_TYPE_ADDRESS64:
 	case ACPI_RESOURCE_TYPE_ADDRESS64:
-		hyperv_mmio.start = res->data.address64.address.minimum;
-		hyperv_mmio.end = res->data.address64.address.maximum;
+		start = res->data.address64.address.minimum;
+		end = res->data.address64.address.maximum;
 		break;
 		break;
+
+	default:
+		/* Unused resource type */
+		return AE_OK;
+
 	}
 	}
+	/*
+	 * Ignore ranges that are below 1MB, as they're not
+	 * necessary or useful here.
+	 */
+	if (end < 0x100000)
+		return AE_OK;
+
+	new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
+	if (!new_res)
+		return AE_NO_MEMORY;
+
+	/* If this range overlaps the virtual TPM, truncate it. */
+	if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
+		end = VTPM_BASE_ADDRESS;
+
+	new_res->name = "hyperv mmio";
+	new_res->flags = IORESOURCE_MEM;
+	new_res->start = start;
+	new_res->end = end;
+
+	do {
+		if (!*old_res) {
+			*old_res = new_res;
+			break;
+		}
+
+		if ((*old_res)->end < new_res->start) {
+			new_res->sibling = *old_res;
+			if (prev_res)
+				(*prev_res)->sibling = new_res;
+			*old_res = new_res;
+			break;
+		}
+
+		prev_res = old_res;
+		old_res = &(*old_res)->sibling;
+
+	} while (1);
 
 
 	return AE_OK;
 	return AE_OK;
 }
 }
 
 
+static int vmbus_acpi_remove(struct acpi_device *device)
+{
+	struct resource *cur_res;
+	struct resource *next_res;
+
+	if (hyperv_mmio) {
+		for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
+			next_res = cur_res->sibling;
+			kfree(cur_res);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
+ * @new:		If successful, supplied a pointer to the
+ *			allocated MMIO space.
+ * @device_obj:		Identifies the caller
+ * @min:		Minimum guest physical address of the
+ *			allocation
+ * @max:		Maximum guest physical address
+ * @size:		Size of the range to be allocated
+ * @align:		Alignment of the range to be allocated
+ * @fb_overlap_ok:	Whether this allocation can be allowed
+ *			to overlap the video frame buffer.
+ *
+ * This function walks the resources granted to VMBus by the
+ * _CRS object in the ACPI namespace underneath the parent
+ * "bridge" whether that's a root PCI bus in the Generation 1
+ * case or a Module Device in the Generation 2 case.  It then
+ * attempts to allocate from the global MMIO pool in a way that
+ * matches the constraints supplied in these parameters and by
+ * that _CRS.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+			resource_size_t min, resource_size_t max,
+			resource_size_t size, resource_size_t align,
+			bool fb_overlap_ok)
+{
+	struct resource *iter;
+	resource_size_t range_min, range_max, start, local_min, local_max;
+	const char *dev_n = dev_name(&device_obj->device);
+	u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
+	int i;
+
+	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+		if ((iter->start >= max) || (iter->end <= min))
+			continue;
+
+		range_min = iter->start;
+		range_max = iter->end;
+
+		/* If this range overlaps the frame buffer, split it into
+		   two tries. */
+		for (i = 0; i < 2; i++) {
+			local_min = range_min;
+			local_max = range_max;
+			if (fb_overlap_ok || (range_min >= fb_end) ||
+			    (range_max <= screen_info.lfb_base)) {
+				i++;
+			} else {
+				if ((range_min <= screen_info.lfb_base) &&
+				    (range_max >= screen_info.lfb_base)) {
+					/*
+					 * The frame buffer is in this window,
+					 * so trim this into the part that
+					 * preceeds the frame buffer.
+					 */
+					local_max = screen_info.lfb_base - 1;
+					range_min = fb_end;
+				} else {
+					range_min = fb_end;
+					continue;
+				}
+			}
+
+			start = (local_min + align - 1) & ~(align - 1);
+			for (; start + size - 1 <= local_max; start += align) {
+				*new = request_mem_region_exclusive(start, size,
+								    dev_n);
+				if (*new)
+					return 0;
+			}
+		}
+	}
+
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
+
 static int vmbus_acpi_add(struct acpi_device *device)
 static int vmbus_acpi_add(struct acpi_device *device)
 {
 {
 	acpi_status result;
 	acpi_status result;
 	int ret_val = -ENODEV;
 	int ret_val = -ENODEV;
+	struct acpi_device *ancestor;
 
 
 	hv_acpi_dev = device;
 	hv_acpi_dev = device;
 
 
@@ -1015,35 +1205,27 @@ static int vmbus_acpi_add(struct acpi_device *device)
 	if (ACPI_FAILURE(result))
 	if (ACPI_FAILURE(result))
 		goto acpi_walk_err;
 		goto acpi_walk_err;
 	/*
 	/*
-	 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
-	 * has the mmio ranges. Get that.
+	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+	 * firmware) is the VMOD that has the mmio ranges. Get that.
 	 */
 	 */
-	if (device->parent) {
-		result = acpi_walk_resources(device->parent->handle,
-					METHOD_NAME__CRS,
-					vmbus_walk_resources, NULL);
+	for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
+		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+					     vmbus_walk_resources, NULL);
 
 
 		if (ACPI_FAILURE(result))
 		if (ACPI_FAILURE(result))
-			goto acpi_walk_err;
-		if (hyperv_mmio.start && hyperv_mmio.end)
-			request_resource(&iomem_resource, &hyperv_mmio);
+			continue;
+		if (hyperv_mmio)
+			break;
 	}
 	}
 	ret_val = 0;
 	ret_val = 0;
 
 
 acpi_walk_err:
 acpi_walk_err:
 	complete(&probe_event);
 	complete(&probe_event);
+	if (ret_val)
+		vmbus_acpi_remove(device);
 	return ret_val;
 	return ret_val;
 }
 }
 
 
-static int vmbus_acpi_remove(struct acpi_device *device)
-{
-	int ret = 0;
-
-	if (hyperv_mmio.start && hyperv_mmio.end)
-		ret = release_resource(&hyperv_mmio);
-	return ret;
-}
-
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
 	{"VMBUS", 0},
 	{"VMBUS", 0},
 	{"VMBus", 0},
 	{"VMBus", 0},
@@ -1060,6 +1242,29 @@ static struct acpi_driver vmbus_acpi_driver = {
 	},
 	},
 };
 };
 
 
+static void hv_kexec_handler(void)
+{
+	int cpu;
+
+	hv_synic_clockevents_cleanup();
+	vmbus_initiate_unload();
+	for_each_online_cpu(cpu)
+		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+	hv_cleanup();
+};
+
+static void hv_crash_handler(struct pt_regs *regs)
+{
+	vmbus_initiate_unload();
+	/*
+	 * In crash handler we can't schedule synic cleanup for all CPUs,
+	 * doing the cleanup for current CPU only. This should be sufficient
+	 * for kdump.
+	 */
+	hv_synic_cleanup(NULL);
+	hv_cleanup();
+};
+
 static int __init hv_acpi_init(void)
 static int __init hv_acpi_init(void)
 {
 {
 	int ret, t;
 	int ret, t;
@@ -1092,6 +1297,9 @@ static int __init hv_acpi_init(void)
 	if (ret)
 	if (ret)
 		goto cleanup;
 		goto cleanup;
 
 
+	hv_setup_kexec_handler(hv_kexec_handler);
+	hv_setup_crash_handler(hv_crash_handler);
+
 	return 0;
 	return 0;
 
 
 cleanup:
 cleanup:
@@ -1104,13 +1312,16 @@ static void __exit vmbus_exit(void)
 {
 {
 	int cpu;
 	int cpu;
 
 
+	hv_remove_kexec_handler();
+	hv_remove_crash_handler();
 	vmbus_connection.conn_state = DISCONNECTED;
 	vmbus_connection.conn_state = DISCONNECTED;
 	hv_synic_clockevents_cleanup();
 	hv_synic_clockevents_cleanup();
 	vmbus_disconnect();
 	vmbus_disconnect();
 	hv_remove_vmbus_irq();
 	hv_remove_vmbus_irq();
 	tasklet_kill(&msg_dpc);
 	tasklet_kill(&msg_dpc);
 	vmbus_free_channels();
 	vmbus_free_channels();
-	if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+		unregister_die_notifier(&hyperv_die_block);
 		atomic_notifier_chain_unregister(&panic_notifier_list,
 		atomic_notifier_chain_unregister(&panic_notifier_list,
 						 &hyperv_panic_block);
 						 &hyperv_panic_block);
 	}
 	}
@@ -1120,8 +1331,10 @@ static void __exit vmbus_exit(void)
 		tasklet_kill(hv_context.event_dpc[cpu]);
 		tasklet_kill(hv_context.event_dpc[cpu]);
 		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
 		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
 	}
 	}
+	hv_synic_free();
 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
-	hv_cpu_hotplug_quirk(false);
+	if (vmbus_proto_version > VERSION_WIN7)
+		cpu_hotplug_enable();
 }
 }
 
 
 
 

+ 5 - 2
drivers/hwtracing/coresight/coresight-etm.h

@@ -183,7 +183,9 @@
  * @seq_13_event: event causing the transition from 1 to 3.
  * @seq_13_event: event causing the transition from 1 to 3.
  * @seq_curr_state: current value of the sequencer register.
  * @seq_curr_state: current value of the sequencer register.
  * @ctxid_idx: index for the context ID registers.
  * @ctxid_idx: index for the context ID registers.
- * @ctxid_val: value for the context ID to trigger on.
+ * @ctxid_pid: value for the context ID to trigger on.
+ * @ctxid_vpid:	Virtual PID seen by users if PID namespace is enabled, otherwise
+ *		the same value of ctxid_pid.
  * @ctxid_mask: mask applicable to all the context IDs.
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq:	Synchronisation frequency.
  * @sync_freq:	Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
  * @timestamp_event: Defines an event that requests the insertion
@@ -235,7 +237,8 @@ struct etm_drvdata {
 	u32				seq_13_event;
 	u32				seq_13_event;
 	u32				seq_curr_state;
 	u32				seq_curr_state;
 	u8				ctxid_idx;
 	u8				ctxid_idx;
-	u32				ctxid_val[ETM_MAX_CTXID_CMP];
+	u32				ctxid_pid[ETM_MAX_CTXID_CMP];
+	u32				ctxid_vpid[ETM_MAX_CTXID_CMP];
 	u32				ctxid_mask;
 	u32				ctxid_mask;
 	u32				sync_freq;
 	u32				sync_freq;
 	u32				timestamp_event;
 	u32				timestamp_event;

+ 22 - 11
drivers/hwtracing/coresight/coresight-etm3x.c

@@ -237,8 +237,11 @@ static void etm_set_default(struct etm_drvdata *drvdata)
 
 
 	drvdata->seq_curr_state = 0x0;
 	drvdata->seq_curr_state = 0x0;
 	drvdata->ctxid_idx = 0x0;
 	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask = 0x0;
 	drvdata->ctxid_mask = 0x0;
 }
 }
 
 
@@ -289,7 +292,7 @@ static void etm_enable_hw(void *info)
 	for (i = 0; i < drvdata->nr_ext_out; i++)
 	for (i = 0; i < drvdata->nr_ext_out; i++)
 		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
 	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+		etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
 	etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
 	etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
 	etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
 	etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
 	/* No external input selected */
 	/* No external input selected */
@@ -1386,38 +1389,41 @@ static ssize_t ctxid_idx_store(struct device *dev,
 }
 }
 static DEVICE_ATTR_RW(ctxid_idx);
 static DEVICE_ATTR_RW(ctxid_idx);
 
 
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 			      struct device_attribute *attr, char *buf)
 {
 {
 	unsigned long val;
 	unsigned long val;
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 
 	spin_lock(&drvdata->spinlock);
 	spin_lock(&drvdata->spinlock);
-	val = drvdata->ctxid_val[drvdata->ctxid_idx];
+	val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
 	spin_unlock(&drvdata->spinlock);
 	spin_unlock(&drvdata->spinlock);
 
 
 	return sprintf(buf, "%#lx\n", val);
 	return sprintf(buf, "%#lx\n", val);
 }
 }
 
 
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
 			       struct device_attribute *attr,
 			       struct device_attribute *attr,
 			       const char *buf, size_t size)
 			       const char *buf, size_t size)
 {
 {
 	int ret;
 	int ret;
-	unsigned long val;
+	unsigned long vpid, pid;
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 
-	ret = kstrtoul(buf, 16, &val);
+	ret = kstrtoul(buf, 16, &vpid);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	pid = coresight_vpid_to_pid(vpid);
+
 	spin_lock(&drvdata->spinlock);
 	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+	drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
+	drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
 	spin_unlock(&drvdata->spinlock);
 	spin_unlock(&drvdata->spinlock);
 
 
 	return size;
 	return size;
 }
 }
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
 
 
 static ssize_t ctxid_mask_show(struct device *dev,
 static ssize_t ctxid_mask_show(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 			       struct device_attribute *attr, char *buf)
@@ -1609,7 +1615,7 @@ static struct attribute *coresight_etm_attrs[] = {
 	&dev_attr_seq_13_event.attr,
 	&dev_attr_seq_13_event.attr,
 	&dev_attr_seq_curr_state.attr,
 	&dev_attr_seq_curr_state.attr,
 	&dev_attr_ctxid_idx.attr,
 	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_val.attr,
+	&dev_attr_ctxid_pid.attr,
 	&dev_attr_ctxid_mask.attr,
 	&dev_attr_ctxid_mask.attr,
 	&dev_attr_sync_freq.attr,
 	&dev_attr_sync_freq.attr,
 	&dev_attr_timestamp_event.attr,
 	&dev_attr_timestamp_event.attr,
@@ -1912,6 +1918,11 @@ static struct amba_id etm_ids[] = {
 		.mask	= 0x0003ffff,
 		.mask	= 0x0003ffff,
 		.data	= "PTM 1.1",
 		.data	= "PTM 1.1",
 	},
 	},
+	{	/* PTM 1.1 Qualcomm */
+		.id	= 0x0003006f,
+		.mask	= 0x0003ffff,
+		.data	= "PTM 1.1",
+	},
 	{ 0, 0},
 	{ 0, 0},
 };
 };
 
 

+ 23 - 14
drivers/hwtracing/coresight/coresight-etm4x.c

@@ -155,7 +155,7 @@ static void etm4_enable_hw(void *info)
 			       drvdata->base + TRCACATRn(i));
 			       drvdata->base + TRCACATRn(i));
 	}
 	}
 	for (i = 0; i < drvdata->numcidc; i++)
 	for (i = 0; i < drvdata->numcidc; i++)
-		writeq_relaxed(drvdata->ctxid_val[i],
+		writeq_relaxed(drvdata->ctxid_pid[i],
 			       drvdata->base + TRCCIDCVRn(i));
 			       drvdata->base + TRCCIDCVRn(i));
 	writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
 	writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
 	writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
 	writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
@@ -506,8 +506,11 @@ static ssize_t reset_store(struct device *dev,
 	}
 	}
 
 
 	drvdata->ctxid_idx = 0x0;
 	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->numcidc; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->numcidc; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 
 
@@ -1815,7 +1818,7 @@ static ssize_t ctxid_idx_store(struct device *dev,
 }
 }
 static DEVICE_ATTR_RW(ctxid_idx);
 static DEVICE_ATTR_RW(ctxid_idx);
 
 
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
 			      struct device_attribute *attr,
 			      struct device_attribute *attr,
 			      char *buf)
 			      char *buf)
 {
 {
@@ -1825,17 +1828,17 @@ static ssize_t ctxid_val_show(struct device *dev,
 
 
 	spin_lock(&drvdata->spinlock);
 	spin_lock(&drvdata->spinlock);
 	idx = drvdata->ctxid_idx;
 	idx = drvdata->ctxid_idx;
-	val = (unsigned long)drvdata->ctxid_val[idx];
+	val = (unsigned long)drvdata->ctxid_vpid[idx];
 	spin_unlock(&drvdata->spinlock);
 	spin_unlock(&drvdata->spinlock);
 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 }
 
 
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
 			       struct device_attribute *attr,
 			       struct device_attribute *attr,
 			       const char *buf, size_t size)
 			       const char *buf, size_t size)
 {
 {
 	u8 idx;
 	u8 idx;
-	unsigned long val;
+	unsigned long vpid, pid;
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 
 	/*
 	/*
@@ -1845,16 +1848,19 @@ static ssize_t ctxid_val_store(struct device *dev,
 	 */
 	 */
 	if (!drvdata->ctxid_size || !drvdata->numcidc)
 	if (!drvdata->ctxid_size || !drvdata->numcidc)
 		return -EINVAL;
 		return -EINVAL;
-	if (kstrtoul(buf, 16, &val))
+	if (kstrtoul(buf, 16, &vpid))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	pid = coresight_vpid_to_pid(vpid);
+
 	spin_lock(&drvdata->spinlock);
 	spin_lock(&drvdata->spinlock);
 	idx = drvdata->ctxid_idx;
 	idx = drvdata->ctxid_idx;
-	drvdata->ctxid_val[idx] = (u64)val;
+	drvdata->ctxid_pid[idx] = (u64)pid;
+	drvdata->ctxid_vpid[idx] = (u64)vpid;
 	spin_unlock(&drvdata->spinlock);
 	spin_unlock(&drvdata->spinlock);
 	return size;
 	return size;
 }
 }
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
 
 
 static ssize_t ctxid_masks_show(struct device *dev,
 static ssize_t ctxid_masks_show(struct device *dev,
 				struct device_attribute *attr,
 				struct device_attribute *attr,
@@ -1949,7 +1955,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
 		 */
 		 */
 		for (j = 0; j < 8; j++) {
 		for (j = 0; j < 8; j++) {
 			if (maskbyte & 1)
 			if (maskbyte & 1)
-				drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
+				drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
 			maskbyte >>= 1;
 			maskbyte >>= 1;
 		}
 		}
 		/* Select the next ctxid comparator mask value */
 		/* Select the next ctxid comparator mask value */
@@ -2193,7 +2199,7 @@ static struct attribute *coresight_etmv4_attrs[] = {
 	&dev_attr_res_idx.attr,
 	&dev_attr_res_idx.attr,
 	&dev_attr_res_ctrl.attr,
 	&dev_attr_res_ctrl.attr,
 	&dev_attr_ctxid_idx.attr,
 	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_val.attr,
+	&dev_attr_ctxid_pid.attr,
 	&dev_attr_ctxid_masks.attr,
 	&dev_attr_ctxid_masks.attr,
 	&dev_attr_vmid_idx.attr,
 	&dev_attr_vmid_idx.attr,
 	&dev_attr_vmid_val.attr,
 	&dev_attr_vmid_val.attr,
@@ -2513,8 +2519,11 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
 		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 	}
 	}
 
 
-	for (i = 0; i < drvdata->numcidc; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->numcidc; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 
 

+ 5 - 2
drivers/hwtracing/coresight/coresight-etm4x.h

@@ -265,7 +265,9 @@
  * @addr_type:	Current status of the comparator register.
  * @addr_type:	Current status of the comparator register.
  * @ctxid_idx:	Context ID index selector.
  * @ctxid_idx:	Context ID index selector.
  * @ctxid_size:	Size of the context ID field to consider.
  * @ctxid_size:	Size of the context ID field to consider.
- * @ctxid_val:	Value of the context ID comparator.
+ * @ctxid_pid:	Value of the context ID comparator.
+ * @ctxid_vpid:	Virtual PID seen by users if PID namespace is enabled, otherwise
+ *		the same value of ctxid_pid.
  * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
  * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
  * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
  * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
  * @vmid_idx:	VM ID index selector.
  * @vmid_idx:	VM ID index selector.
@@ -352,7 +354,8 @@ struct etmv4_drvdata {
 	u8				addr_type[ETM_MAX_SINGLE_ADDR_CMP];
 	u8				addr_type[ETM_MAX_SINGLE_ADDR_CMP];
 	u8				ctxid_idx;
 	u8				ctxid_idx;
 	u8				ctxid_size;
 	u8				ctxid_size;
-	u64				ctxid_val[ETMv4_MAX_CTXID_CMP];
+	u64				ctxid_pid[ETMv4_MAX_CTXID_CMP];
+	u64				ctxid_vpid[ETMv4_MAX_CTXID_CMP];
 	u32				ctxid_mask0;
 	u32				ctxid_mask0;
 	u32				ctxid_mask1;
 	u32				ctxid_mask1;
 	u8				vmid_idx;
 	u8				vmid_idx;

+ 1 - 12
drivers/hwtracing/coresight/coresight-replicator.c

@@ -12,7 +12,6 @@
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/io.h>
@@ -184,17 +183,7 @@ static struct platform_driver replicator_driver = {
 	},
 	},
 };
 };
 
 
-static int __init replicator_init(void)
-{
-	return platform_driver_register(&replicator_driver);
-}
-module_init(replicator_init);
-
-static void __exit replicator_exit(void)
-{
-	platform_driver_unregister(&replicator_driver);
-}
-module_exit(replicator_exit);
+builtin_platform_driver(replicator_driver);
 
 
 MODULE_LICENSE("GPL v2");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("CoreSight Replicator driver");
 MODULE_DESCRIPTION("CoreSight Replicator driver");

+ 1 - 3
drivers/md/dm-ioctl.c

@@ -1919,9 +1919,7 @@ int __init dm_interface_init(void)
 
 
 void dm_interface_exit(void)
 void dm_interface_exit(void)
 {
 {
-	if (misc_deregister(&_dm_misc) < 0)
-		DMERR("misc_deregister failed for control device");
-
+	misc_deregister(&_dm_misc);
 	dm_hash_exit();
 	dm_hash_exit();
 }
 }
 
 

+ 10 - 0
drivers/misc/Kconfig

@@ -271,6 +271,16 @@ config HP_ILO
 	  To compile this driver as a module, choose M here: the
 	  To compile this driver as a module, choose M here: the
 	  module will be called hpilo.
 	  module will be called hpilo.
 
 
+config QCOM_COINCELL
+	tristate "Qualcomm coincell charger support"
+	depends on MFD_SPMI_PMIC || COMPILE_TEST
+	help
+	  This driver supports the coincell block found inside of
+	  Qualcomm PMICs.  The coincell charger provides a means to
+	  charge a coincell battery or backup capacitor which is used
+	  to maintain PMIC register and RTC state in the absence of
+	  external power.
+
 config SGI_GRU
 config SGI_GRU
 	tristate "SGI GRU driver"
 	tristate "SGI GRU driver"
 	depends on X86_UV && SMP
 	depends on X86_UV && SMP

+ 1 - 0
drivers/misc/Makefile

@@ -18,6 +18,7 @@ obj-$(CONFIG_LKDTM)		+= lkdtm.o
 obj-$(CONFIG_TIFM_CORE)       	+= tifm_core.o
 obj-$(CONFIG_TIFM_CORE)       	+= tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)       	+= tifm_7xx1.o
 obj-$(CONFIG_TIFM_7XX1)       	+= tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)		+= phantom.o
 obj-$(CONFIG_PHANTOM)		+= phantom.o
+obj-$(CONFIG_QCOM_COINCELL)	+= qcom-coincell.o
 obj-$(CONFIG_SENSORS_BH1780)	+= bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1780)	+= bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)	+= bh1770glc.o
 obj-$(CONFIG_SENSORS_BH1770)	+= bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o

+ 0 - 1
drivers/misc/ad525x_dpot-i2c.c

@@ -106,7 +106,6 @@ MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
 static struct i2c_driver ad_dpot_i2c_driver = {
 static struct i2c_driver ad_dpot_i2c_driver = {
 	.driver = {
 	.driver = {
 		.name	= "ad_dpot",
 		.name	= "ad_dpot",
-		.owner	= THIS_MODULE,
 	},
 	},
 	.probe		= ad_dpot_i2c_probe,
 	.probe		= ad_dpot_i2c_probe,
 	.remove		= ad_dpot_i2c_remove,
 	.remove		= ad_dpot_i2c_remove,

+ 0 - 1
drivers/misc/apds990x.c

@@ -1275,7 +1275,6 @@ static const struct dev_pm_ops apds990x_pm_ops = {
 static struct i2c_driver apds990x_driver = {
 static struct i2c_driver apds990x_driver = {
 	.driver	 = {
 	.driver	 = {
 		.name	= "apds990x",
 		.name	= "apds990x",
-		.owner	= THIS_MODULE,
 		.pm	= &apds990x_pm_ops,
 		.pm	= &apds990x_pm_ops,
 	},
 	},
 	.probe	  = apds990x_probe,
 	.probe	  = apds990x_probe,

+ 0 - 1
drivers/misc/bh1770glc.c

@@ -1396,7 +1396,6 @@ static const struct dev_pm_ops bh1770_pm_ops = {
 static struct i2c_driver bh1770_driver = {
 static struct i2c_driver bh1770_driver = {
 	.driver	 = {
 	.driver	 = {
 		.name	= "bh1770glc",
 		.name	= "bh1770glc",
-		.owner	= THIS_MODULE,
 		.pm	= &bh1770_pm_ops,
 		.pm	= &bh1770_pm_ops,
 	},
 	},
 	.probe	  = bh1770_probe,
 	.probe	  = bh1770_probe,

+ 0 - 1
drivers/misc/bmp085-i2c.c

@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(i2c, bmp085_id);
 
 
 static struct i2c_driver bmp085_i2c_driver = {
 static struct i2c_driver bmp085_i2c_driver = {
 	.driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= BMP085_NAME,
 		.name	= BMP085_NAME,
 	},
 	},
 	.id_table	= bmp085_id,
 	.id_table	= bmp085_id,

+ 1 - 6
drivers/misc/cxl/sysfs.c

@@ -443,12 +443,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
 	struct afu_config_record *cr = to_cr(kobj);
 	struct afu_config_record *cr = to_cr(kobj);
 	struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
 	struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
 
 
-	u64 i, j, val, size = afu->crs_len;
-
-	if (off > size)
-		return 0;
-	if (off + count > size)
-		count = size - off;
+	u64 i, j, val;
 
 
 	for (i = 0; i < count;) {
 	for (i = 0; i < count;) {
 		val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
 		val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);

+ 0 - 12
drivers/misc/ds1682.c

@@ -148,12 +148,6 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
 	dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
 	dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
 		buf, off, count);
 		buf, off, count);
 
 
-	if (off >= DS1682_EEPROM_SIZE)
-		return 0;
-
-	if (off + count > DS1682_EEPROM_SIZE)
-		count = DS1682_EEPROM_SIZE - off;
-
 	rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
 	rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
 					   count, buf);
 					   count, buf);
 	if (rc < 0)
 	if (rc < 0)
@@ -171,12 +165,6 @@ static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
 	dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
 	dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
 		buf, off, count);
 		buf, off, count);
 
 
-	if (off >= DS1682_EEPROM_SIZE)
-		return -ENOSPC;
-
-	if (off + count > DS1682_EEPROM_SIZE)
-		count = DS1682_EEPROM_SIZE - off;
-
 	/* Write out to the device */
 	/* Write out to the device */
 	if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
 	if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
 					   count, buf) < 0)
 					   count, buf) < 0)

+ 0 - 13
drivers/misc/eeprom/Kconfig

@@ -96,17 +96,4 @@ config EEPROM_DIGSY_MTC_CFG
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
-config EEPROM_SUNXI_SID
-	tristate "Allwinner sunxi security ID support"
-	depends on ARCH_SUNXI && SYSFS
-	help
-	  This is a driver for the 'security ID' available on various Allwinner
-	  devices.
-
-	  Due to the potential risks involved with changing e-fuses,
-	  this driver is read-only.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called sunxi_sid.
-
 endmenu
 endmenu

+ 0 - 1
drivers/misc/eeprom/Makefile

@@ -4,5 +4,4 @@ obj-$(CONFIG_EEPROM_LEGACY)	+= eeprom.o
 obj-$(CONFIG_EEPROM_MAX6875)	+= max6875.o
 obj-$(CONFIG_EEPROM_MAX6875)	+= max6875.o
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
 obj-$(CONFIG_EEPROM_93XX46)	+= eeprom_93xx46.o
 obj-$(CONFIG_EEPROM_93XX46)	+= eeprom_93xx46.o
-obj-$(CONFIG_EEPROM_SUNXI_SID)	+= sunxi_sid.o
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o

+ 0 - 1
drivers/misc/eeprom/at24.c

@@ -686,7 +686,6 @@ static int at24_remove(struct i2c_client *client)
 static struct i2c_driver at24_driver = {
 static struct i2c_driver at24_driver = {
 	.driver = {
 	.driver = {
 		.name = "at24",
 		.name = "at24",
-		.owner = THIS_MODULE,
 	},
 	},
 	.probe = at24_probe,
 	.probe = at24_probe,
 	.remove = at24_remove,
 	.remove = at24_remove,

+ 0 - 5
drivers/misc/eeprom/eeprom.c

@@ -88,11 +88,6 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
 	struct eeprom_data *data = i2c_get_clientdata(client);
 	struct eeprom_data *data = i2c_get_clientdata(client);
 	u8 slice;
 	u8 slice;
 
 
-	if (off > EEPROM_SIZE)
-		return 0;
-	if (off + count > EEPROM_SIZE)
-		count = EEPROM_SIZE - off;
-
 	/* Only refresh slices which contain requested bytes */
 	/* Only refresh slices which contain requested bytes */
 	for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
 	for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
 		eeprom_update_client(client, slice);
 		eeprom_update_client(client, slice);

+ 0 - 14
drivers/misc/eeprom/eeprom_93xx46.c

@@ -48,13 +48,6 @@ eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
 	dev = container_of(kobj, struct device, kobj);
 	dev = container_of(kobj, struct device, kobj);
 	edev = dev_get_drvdata(dev);
 	edev = dev_get_drvdata(dev);
 
 
-	if (unlikely(off >= edev->bin.size))
-		return 0;
-	if ((off + count) > edev->bin.size)
-		count = edev->bin.size - off;
-	if (unlikely(!count))
-		return count;
-
 	cmd_addr = OP_READ << edev->addrlen;
 	cmd_addr = OP_READ << edev->addrlen;
 
 
 	if (edev->addrlen == 7) {
 	if (edev->addrlen == 7) {
@@ -200,13 +193,6 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
 	dev = container_of(kobj, struct device, kobj);
 	dev = container_of(kobj, struct device, kobj);
 	edev = dev_get_drvdata(dev);
 	edev = dev_get_drvdata(dev);
 
 
-	if (unlikely(off >= edev->bin.size))
-		return -EFBIG;
-	if ((off + count) > edev->bin.size)
-		count = edev->bin.size - off;
-	if (unlikely(!count))
-		return count;
-
 	/* only write even number of bytes on 16-bit devices */
 	/* only write even number of bytes on 16-bit devices */
 	if (edev->addrlen == 6) {
 	if (edev->addrlen == 6) {
 		step = 2;
 		step = 2;

+ 0 - 6
drivers/misc/eeprom/max6875.c

@@ -114,12 +114,6 @@ static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
 	struct max6875_data *data = i2c_get_clientdata(client);
 	struct max6875_data *data = i2c_get_clientdata(client);
 	int slice, max_slice;
 	int slice, max_slice;
 
 
-	if (off > USER_EEPROM_SIZE)
-		return 0;
-
-	if (off + count > USER_EEPROM_SIZE)
-		count = USER_EEPROM_SIZE - off;
-
 	/* refresh slices which contain requested bytes */
 	/* refresh slices which contain requested bytes */
 	max_slice = (off + count - 1) >> SLICE_BITS;
 	max_slice = (off + count - 1) >> SLICE_BITS;
 	for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
 	for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)

+ 0 - 156
drivers/misc/eeprom/sunxi_sid.c

@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
- * http://www.linux-sunxi.org
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This driver exposes the Allwinner security ID, efuses exported in byte-
- * sized chunks.
- */
-
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-#define DRV_NAME "sunxi-sid"
-
-struct sunxi_sid_data {
-	void __iomem *reg_base;
-	unsigned int keysize;
-};
-
-/* We read the entire key, due to a 32 bit read alignment requirement. Since we
- * want to return the requested byte, this results in somewhat slower code and
- * uses 4 times more reads as needed but keeps code simpler. Since the SID is
- * only very rarely probed, this is not really an issue.
- */
-static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data,
-			      const unsigned int offset)
-{
-	u32 sid_key;
-
-	if (offset >= sid_data->keysize)
-		return 0;
-
-	sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4));
-	sid_key >>= (offset % 4) * 8;
-
-	return sid_key; /* Only return the last byte */
-}
-
-static ssize_t sid_read(struct file *fd, struct kobject *kobj,
-			struct bin_attribute *attr, char *buf,
-			loff_t pos, size_t size)
-{
-	struct platform_device *pdev;
-	struct sunxi_sid_data *sid_data;
-	int i;
-
-	pdev = to_platform_device(kobj_to_dev(kobj));
-	sid_data = platform_get_drvdata(pdev);
-
-	if (pos < 0 || pos >= sid_data->keysize)
-		return 0;
-	if (size > sid_data->keysize - pos)
-		size = sid_data->keysize - pos;
-
-	for (i = 0; i < size; i++)
-		buf[i] = sunxi_sid_read_byte(sid_data, pos + i);
-
-	return i;
-}
-
-static struct bin_attribute sid_bin_attr = {
-	.attr = { .name = "eeprom", .mode = S_IRUGO, },
-	.read = sid_read,
-};
-
-static int sunxi_sid_remove(struct platform_device *pdev)
-{
-	device_remove_bin_file(&pdev->dev, &sid_bin_attr);
-	dev_dbg(&pdev->dev, "driver unloaded\n");
-
-	return 0;
-}
-
-static const struct of_device_id sunxi_sid_of_match[] = {
-	{ .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
-	{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
-	{/* sentinel */},
-};
-MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
-
-static int sunxi_sid_probe(struct platform_device *pdev)
-{
-	struct sunxi_sid_data *sid_data;
-	struct resource *res;
-	const struct of_device_id *of_dev_id;
-	u8 *entropy;
-	unsigned int i;
-
-	sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data),
-				GFP_KERNEL);
-	if (!sid_data)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(sid_data->reg_base))
-		return PTR_ERR(sid_data->reg_base);
-
-	of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev);
-	if (!of_dev_id)
-		return -ENODEV;
-	sid_data->keysize = (int)of_dev_id->data;
-
-	platform_set_drvdata(pdev, sid_data);
-
-	sid_bin_attr.size = sid_data->keysize;
-	if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
-		return -ENODEV;
-
-	entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL);
-	for (i = 0; i < sid_data->keysize; i++)
-		entropy[i] = sunxi_sid_read_byte(sid_data, i);
-	add_device_randomness(entropy, sid_data->keysize);
-	kfree(entropy);
-
-	dev_dbg(&pdev->dev, "loaded\n");
-
-	return 0;
-}
-
-static struct platform_driver sunxi_sid_driver = {
-	.probe = sunxi_sid_probe,
-	.remove = sunxi_sid_remove,
-	.driver = {
-		.name = DRV_NAME,
-		.of_match_table = sunxi_sid_of_match,
-	},
-};
-module_platform_driver(sunxi_sid_driver);
-
-MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
-MODULE_DESCRIPTION("Allwinner sunxi security id driver");
-MODULE_LICENSE("GPL");

+ 0 - 1
drivers/misc/isl29003.c

@@ -465,7 +465,6 @@ MODULE_DEVICE_TABLE(i2c, isl29003_id);
 static struct i2c_driver isl29003_driver = {
 static struct i2c_driver isl29003_driver = {
 	.driver = {
 	.driver = {
 		.name	= ISL29003_DRV_NAME,
 		.name	= ISL29003_DRV_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= ISL29003_PM_OPS,
 		.pm	= ISL29003_PM_OPS,
 	},
 	},
 	.probe	= isl29003_probe,
 	.probe	= isl29003_probe,

+ 0 - 1
drivers/misc/lis3lv02d/lis3lv02d_i2c.c

@@ -274,7 +274,6 @@ static const struct dev_pm_ops lis3_pm_ops = {
 static struct i2c_driver lis3lv02d_i2c_driver = {
 static struct i2c_driver lis3lv02d_i2c_driver = {
 	.driver	 = {
 	.driver	 = {
 		.name   = DRV_NAME,
 		.name   = DRV_NAME,
-		.owner  = THIS_MODULE,
 		.pm     = &lis3_pm_ops,
 		.pm     = &lis3_pm_ops,
 		.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
 		.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
 	},
 	},

+ 1 - 1
drivers/misc/mei/Makefile

@@ -11,7 +11,7 @@ mei-objs += main.o
 mei-objs += amthif.o
 mei-objs += amthif.o
 mei-objs += wd.o
 mei-objs += wd.o
 mei-objs += bus.o
 mei-objs += bus.o
-mei-objs += nfc.o
+mei-objs += bus-fixup.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o
 
 
 obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
 obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o

+ 306 - 0
drivers/misc/mei/bus-fixup.c

@@ -0,0 +1,306 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include <linux/mei_cl_bus.h>
+
+#include "mei_dev.h"
+#include "client.h"
+
+#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
+			0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
+
+static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
+
+#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
+			0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
+#define MEI_UUID_ANY NULL_UUID_LE
+
+/**
+ * number_of_connections - determine whether an client be on the bus
+ *    according number of connections
+ *    We support only clients:
+ *       1. with single connection
+ *       2. and fixed clients (max_number_of_connections == 0)
+ *
+ * @cldev: me clients device
+ */
+static void number_of_connections(struct mei_cl_device *cldev)
+{
+	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+			__func__, mei_me_cl_uuid(cldev->me_cl));
+
+	if (cldev->me_cl->props.max_number_of_connections > 1)
+		cldev->do_match = 0;
+}
+
+/**
+ * blacklist - blacklist a client from the bus
+ *
+ * @cldev: me clients device
+ */
+static void blacklist(struct mei_cl_device *cldev)
+{
+	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+			__func__, mei_me_cl_uuid(cldev->me_cl));
+	cldev->do_match = 0;
+}
+
+struct mei_nfc_cmd {
+	u8 command;
+	u8 status;
+	u16 req_id;
+	u32 reserved;
+	u16 data_size;
+	u8 sub_command;
+	u8 data[];
+} __packed;
+
+struct mei_nfc_reply {
+	u8 command;
+	u8 status;
+	u16 req_id;
+	u32 reserved;
+	u16 data_size;
+	u8 sub_command;
+	u8 reply_status;
+	u8 data[];
+} __packed;
+
+struct mei_nfc_if_version {
+	u8 radio_version_sw[3];
+	u8 reserved[3];
+	u8 radio_version_hw[3];
+	u8 i2c_addr;
+	u8 fw_ivn;
+	u8 vendor_id;
+	u8 radio_type;
+} __packed;
+
+
+#define MEI_NFC_CMD_MAINTENANCE 0x00
+#define MEI_NFC_SUBCMD_IF_VERSION 0x01
+
+/* Vendors */
+#define MEI_NFC_VENDOR_INSIDE 0x00
+#define MEI_NFC_VENDOR_NXP    0x01
+
+/* Radio types */
+#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
+#define MEI_NFC_VENDOR_NXP_PN544    0x01
+
+/**
+ * mei_nfc_if_version - get NFC interface version
+ *
+ * @cl: host client (nfc info)
+ * @ver: NFC interface version to be filled in
+ *
+ * Return: 0 on success; < 0 otherwise
+ */
+static int mei_nfc_if_version(struct mei_cl *cl,
+			      struct mei_nfc_if_version *ver)
+{
+	struct mei_device *bus;
+	struct mei_nfc_cmd cmd = {
+		.command = MEI_NFC_CMD_MAINTENANCE,
+		.data_size = 1,
+		.sub_command = MEI_NFC_SUBCMD_IF_VERSION,
+	};
+	struct mei_nfc_reply *reply = NULL;
+	size_t if_version_length;
+	int bytes_recv, ret;
+
+	bus = cl->dev;
+
+	WARN_ON(mutex_is_locked(&bus->device_lock));
+
+	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+	if (ret < 0) {
+		dev_err(bus->dev, "Could not send IF version cmd\n");
+		return ret;
+	}
+
+	/* to be sure on the stack we alloc memory */
+	if_version_length = sizeof(struct mei_nfc_reply) +
+		sizeof(struct mei_nfc_if_version);
+
+	reply = kzalloc(if_version_length, GFP_KERNEL);
+	if (!reply)
+		return -ENOMEM;
+
+	ret = 0;
+	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+		dev_err(bus->dev, "Could not read IF version\n");
+		ret = -EIO;
+		goto err;
+	}
+
+	memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+
+	dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
+		ver->fw_ivn, ver->vendor_id, ver->radio_type);
+
+err:
+	kfree(reply);
+	return ret;
+}
+
+/**
+ * mei_nfc_radio_name - derive nfc radio name from the interface version
+ *
+ * @ver: NFC radio version
+ *
+ * Return: radio name string
+ */
+static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver)
+{
+
+	if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) {
+		if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD)
+			return "microread";
+	}
+
+	if (ver->vendor_id == MEI_NFC_VENDOR_NXP) {
+		if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544)
+			return "pn544";
+	}
+
+	return NULL;
+}
+
+/**
+ * mei_nfc - The nfc fixup function. The function retrieves nfc radio
+ *    name and set is as device attribute so we can load
+ *    the proper device driver for it
+ *
+ * @cldev: me client device (nfc)
+ */
+static void mei_nfc(struct mei_cl_device *cldev)
+{
+	struct mei_device *bus;
+	struct mei_cl *cl;
+	struct mei_me_client *me_cl = NULL;
+	struct mei_nfc_if_version ver;
+	const char *radio_name = NULL;
+	int ret;
+
+	bus = cldev->bus;
+
+	dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
+		__func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+
+	mutex_lock(&bus->device_lock);
+	/* we need to connect to INFO GUID */
+	cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+	if (IS_ERR(cl)) {
+		ret = PTR_ERR(cl);
+		cl = NULL;
+		dev_err(bus->dev, "nfc hook alloc failed %d\n", ret);
+		goto out;
+	}
+
+	me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid);
+	if (!me_cl) {
+		ret = -ENOTTY;
+		dev_err(bus->dev, "Cannot find nfc info %d\n", ret);
+		goto out;
+	}
+
+	ret = mei_cl_connect(cl, me_cl, NULL);
+	if (ret < 0) {
+		dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n",
+			ret);
+		goto out;
+	}
+
+	mutex_unlock(&bus->device_lock);
+
+	ret = mei_nfc_if_version(cl, &ver);
+	if (ret)
+		goto disconnect;
+
+	radio_name = mei_nfc_radio_name(&ver);
+
+	if (!radio_name) {
+		ret = -ENOENT;
+		dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n",
+			ret);
+		goto disconnect;
+	}
+
+	dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
+	strlcpy(cldev->name, radio_name, sizeof(cldev->name));
+
+disconnect:
+	mutex_lock(&bus->device_lock);
+	if (mei_cl_disconnect(cl) < 0)
+		dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n");
+
+	mei_cl_flush_queues(cl, NULL);
+
+out:
+	mei_cl_unlink(cl);
+	mutex_unlock(&bus->device_lock);
+	mei_me_cl_put(me_cl);
+	kfree(cl);
+
+	if (ret)
+		cldev->do_match = 0;
+
+	dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
+}
+
+#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
+
+static struct mei_fixup {
+
+	const uuid_le uuid;
+	void (*hook)(struct mei_cl_device *cldev);
+} mei_fixups[] = {
+	MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
+	MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
+	MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+};
+
+/**
+ * mei_cl_dev_fixup - run fixup handlers
+ *
+ * @cldev: me client device
+ */
+void mei_cl_dev_fixup(struct mei_cl_device *cldev)
+{
+	struct mei_fixup *f;
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
+
+		f = &mei_fixups[i];
+		if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 ||
+		    uuid_le_cmp(f->uuid, *uuid) == 0)
+			f->hook(cldev);
+	}
+}
+

+ 676 - 332
drivers/misc/mei/bus.c

@@ -30,276 +30,29 @@
 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
 #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
 #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
 
 
-static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver = to_mei_cl_driver(drv);
-	const struct mei_cl_device_id *id;
-	const uuid_le *uuid;
-	const char *name;
-
-	if (!device)
-		return 0;
-
-	uuid = mei_me_cl_uuid(device->me_cl);
-	name = device->name;
-
-	if (!driver || !driver->id_table)
-		return 0;
-
-	id = driver->id_table;
-
-	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
-
-		if (!uuid_le_cmp(*uuid, id->uuid)) {
-			if (id->name[0]) {
-				if (!strncmp(name, id->name, sizeof(id->name)))
-					return 1;
-			} else {
-				return 1;
-			}
-		}
-
-		id++;
-	}
-
-	return 0;
-}
-
-static int mei_cl_device_probe(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver;
-	struct mei_cl_device_id id;
-
-	if (!device)
-		return 0;
-
-	driver = to_mei_cl_driver(dev->driver);
-	if (!driver || !driver->probe)
-		return -ENODEV;
-
-	dev_dbg(dev, "Device probe\n");
-
-	strlcpy(id.name, device->name, sizeof(id.name));
-
-	return driver->probe(device, &id);
-}
-
-static int mei_cl_device_remove(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver;
-
-	if (!device || !dev->driver)
-		return 0;
-
-	if (device->event_cb) {
-		device->event_cb = NULL;
-		cancel_work_sync(&device->event_work);
-	}
-
-	driver = to_mei_cl_driver(dev->driver);
-	if (!driver->remove) {
-		dev->driver = NULL;
-
-		return 0;
-	}
-
-	return driver->remove(device);
-}
-
-static ssize_t name_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "%s", device->name);
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(name);
-
-static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(uuid);
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
-		device->name, MEI_CL_UUID_ARGS(uuid->b));
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *mei_cl_dev_attrs[] = {
-	&dev_attr_name.attr,
-	&dev_attr_uuid.attr,
-	&dev_attr_modalias.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(mei_cl_dev);
-
-static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-
-	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
-		return -ENOMEM;
-
-	if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
-		return -ENOMEM;
-
-	if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
-		device->name, MEI_CL_UUID_ARGS(uuid->b)))
-		return -ENOMEM;
-
-	return 0;
-}
-
-static struct bus_type mei_cl_bus_type = {
-	.name		= "mei",
-	.dev_groups	= mei_cl_dev_groups,
-	.match		= mei_cl_device_match,
-	.probe		= mei_cl_device_probe,
-	.remove		= mei_cl_device_remove,
-	.uevent		= mei_cl_uevent,
-};
-
-static void mei_cl_dev_release(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-
-	if (!device)
-		return;
-
-	mei_me_cl_put(device->me_cl);
-	kfree(device);
-}
-
-static struct device_type mei_cl_device_type = {
-	.release	= mei_cl_dev_release,
-};
-
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
-					 uuid_le uuid)
-{
-	struct mei_cl *cl;
-
-	list_for_each_entry(cl, &dev->device_list, device_link) {
-		if (cl->device && cl->device->me_cl &&
-		    !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
-			return cl;
-	}
-
-	return NULL;
-}
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
-					struct mei_me_client *me_cl,
-					struct mei_cl *cl,
-					char *name)
-{
-	struct mei_cl_device *device;
-	int status;
-
-	device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
-	if (!device)
-		return NULL;
-
-	device->me_cl = mei_me_cl_get(me_cl);
-	if (!device->me_cl) {
-		kfree(device);
-		return NULL;
-	}
-
-	device->cl = cl;
-	device->dev.parent = dev->dev;
-	device->dev.bus = &mei_cl_bus_type;
-	device->dev.type = &mei_cl_device_type;
-
-	strlcpy(device->name, name, sizeof(device->name));
-
-	dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
-
-	status = device_register(&device->dev);
-	if (status) {
-		dev_err(dev->dev, "Failed to register MEI device\n");
-		mei_me_cl_put(device->me_cl);
-		kfree(device);
-		return NULL;
-	}
-
-	cl->device = device;
-
-	dev_dbg(&device->dev, "client %s registered\n", name);
-
-	return device;
-}
-EXPORT_SYMBOL_GPL(mei_cl_add_device);
-
-void mei_cl_remove_device(struct mei_cl_device *device)
-{
-	device_unregister(&device->dev);
-}
-EXPORT_SYMBOL_GPL(mei_cl_remove_device);
-
-int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner)
-{
-	int err;
-
-	driver->driver.name = driver->name;
-	driver->driver.owner = owner;
-	driver->driver.bus = &mei_cl_bus_type;
-
-	err = driver_register(&driver->driver);
-	if (err)
-		return err;
-
-	pr_debug("mei: driver [%s] registered\n", driver->driver.name);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
-
-void mei_cl_driver_unregister(struct mei_cl_driver *driver)
-{
-	driver_unregister(&driver->driver);
-
-	pr_debug("mei: driver [%s] unregistered\n", driver->driver.name);
-}
-EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-
+/**
+ * __mei_cl_send - internal client send (write)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ * @blocking: wait for write completion
+ *
+ * Return: written size bytes or < 0 on error
+ */
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking)
 			bool blocking)
 {
 {
-	struct mei_device *dev;
+	struct mei_device *bus;
 	struct mei_cl_cb *cb = NULL;
 	struct mei_cl_cb *cb = NULL;
 	ssize_t rets;
 	ssize_t rets;
 
 
 	if (WARN_ON(!cl || !cl->dev))
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	dev = cl->dev;
+	bus = cl->dev;
 
 
-	mutex_lock(&dev->device_lock);
+	mutex_lock(&bus->device_lock);
 	if (!mei_cl_is_connected(cl)) {
 	if (!mei_cl_is_connected(cl)) {
 		rets = -ENODEV;
 		rets = -ENODEV;
 		goto out;
 		goto out;
@@ -327,16 +80,25 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 	rets = mei_cl_write(cl, cb, blocking);
 	rets = mei_cl_write(cl, cb, blocking);
 
 
 out:
 out:
-	mutex_unlock(&dev->device_lock);
+	mutex_unlock(&bus->device_lock);
 	if (rets < 0)
 	if (rets < 0)
 		mei_io_cb_free(cb);
 		mei_io_cb_free(cb);
 
 
 	return rets;
 	return rets;
 }
 }
 
 
+/**
+ * __mei_cl_recv - internal client receive (read)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 {
 {
-	struct mei_device *dev;
+	struct mei_device *bus;
 	struct mei_cl_cb *cb;
 	struct mei_cl_cb *cb;
 	size_t r_length;
 	size_t r_length;
 	ssize_t rets;
 	ssize_t rets;
@@ -344,9 +106,9 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 	if (WARN_ON(!cl || !cl->dev))
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	dev = cl->dev;
+	bus = cl->dev;
 
 
-	mutex_lock(&dev->device_lock);
+	mutex_lock(&bus->device_lock);
 
 
 	cb = mei_cl_read_cb(cl, NULL);
 	cb = mei_cl_read_cb(cl, NULL);
 	if (cb)
 	if (cb)
@@ -356,9 +118,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 	if (rets && rets != -EBUSY)
 	if (rets && rets != -EBUSY)
 		goto out;
 		goto out;
 
 
+	/* wait on event only if there is no other waiter */
 	if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
 	if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
 
 
-		mutex_unlock(&dev->device_lock);
+		mutex_unlock(&bus->device_lock);
 
 
 		if (wait_event_interruptible(cl->rx_wait,
 		if (wait_event_interruptible(cl->rx_wait,
 				(!list_empty(&cl->rd_completed)) ||
 				(!list_empty(&cl->rd_completed)) ||
@@ -369,7 +132,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 			return -ERESTARTSYS;
 			return -ERESTARTSYS;
 		}
 		}
 
 
-		mutex_lock(&dev->device_lock);
+		mutex_lock(&bus->device_lock);
 
 
 		if (!mei_cl_is_connected(cl)) {
 		if (!mei_cl_is_connected(cl)) {
 			rets = -EBUSY;
 			rets = -EBUSY;
@@ -396,14 +159,23 @@ copy:
 free:
 free:
 	mei_io_cb_free(cb);
 	mei_io_cb_free(cb);
 out:
 out:
-	mutex_unlock(&dev->device_lock);
+	mutex_unlock(&bus->device_lock);
 
 
 	return rets;
 	return rets;
 }
 }
 
 
-ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_send - me device send  (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: written size in bytes or < 0 on error
+ */
+ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
 {
-	struct mei_cl *cl = device->cl;
+	struct mei_cl *cl = cldev->cl;
 
 
 	if (cl == NULL)
 	if (cl == NULL)
 		return -ENODEV;
 		return -ENODEV;
@@ -412,9 +184,18 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_send);
 EXPORT_SYMBOL_GPL(mei_cl_send);
 
 
-ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_recv - client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
+ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
 {
-	struct mei_cl *cl = device->cl;
+	struct mei_cl *cl = cldev->cl;
 
 
 	if (cl == NULL)
 	if (cl == NULL)
 		return -ENODEV;
 		return -ENODEV;
@@ -423,134 +204,697 @@ ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_recv);
 EXPORT_SYMBOL_GPL(mei_cl_recv);
 
 
+/**
+ * mei_bus_event_work  - dispatch rx event for a bus device
+ *    and schedule new work
+ *
+ * @work: work
+ */
 static void mei_bus_event_work(struct work_struct *work)
 static void mei_bus_event_work(struct work_struct *work)
 {
 {
-	struct mei_cl_device *device;
+	struct mei_cl_device *cldev;
 
 
-	device = container_of(work, struct mei_cl_device, event_work);
+	cldev = container_of(work, struct mei_cl_device, event_work);
 
 
-	if (device->event_cb)
-		device->event_cb(device, device->events, device->event_context);
+	if (cldev->event_cb)
+		cldev->event_cb(cldev, cldev->events, cldev->event_context);
 
 
-	device->events = 0;
+	cldev->events = 0;
 
 
 	/* Prepare for the next read */
 	/* Prepare for the next read */
-	mei_cl_read_start(device->cl, 0, NULL);
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
+		mei_cl_read_start(cldev->cl, 0, NULL);
 }
 }
 
 
-int mei_cl_register_event_cb(struct mei_cl_device *device,
+/**
+ * mei_cl_bus_notify_event - schedule notify cb on bus client
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_notify_event(struct mei_cl *cl)
+{
+	struct mei_cl_device *cldev = cl->cldev;
+
+	if (!cldev || !cldev->event_cb)
+		return;
+
+	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+		return;
+
+	if (!cl->notify_ev)
+		return;
+
+	set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
+
+	schedule_work(&cldev->event_work);
+
+	cl->notify_ev = false;
+}
+
+/**
+ * mei_cl_bus_rx_event  - schedule rx evenet
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_rx_event(struct mei_cl *cl)
+{
+	struct mei_cl_device *cldev = cl->cldev;
+
+	if (!cldev || !cldev->event_cb)
+		return;
+
+	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
+		return;
+
+	set_bit(MEI_CL_EVENT_RX, &cldev->events);
+
+	schedule_work(&cldev->event_work);
+}
+
+/**
+ * mei_cl_register_event_cb - register event callback
+ *
+ * @cldev: me client devices
+ * @event_cb: callback function
+ * @events_mask: requested events bitmask
+ * @context: driver context data
+ *
+ * Return: 0 on success
+ *         -EALREADY if an callback is already registered
+ *         <0 on other errors
+ */
+int mei_cl_register_event_cb(struct mei_cl_device *cldev,
+			  unsigned long events_mask,
 			  mei_cl_event_cb_t event_cb, void *context)
 			  mei_cl_event_cb_t event_cb, void *context)
 {
 {
-	if (device->event_cb)
+	int ret;
+
+	if (cldev->event_cb)
 		return -EALREADY;
 		return -EALREADY;
 
 
-	device->events = 0;
-	device->event_cb = event_cb;
-	device->event_context = context;
-	INIT_WORK(&device->event_work, mei_bus_event_work);
+	cldev->events = 0;
+	cldev->events_mask = events_mask;
+	cldev->event_cb = event_cb;
+	cldev->event_context = context;
+	INIT_WORK(&cldev->event_work, mei_bus_event_work);
 
 
-	mei_cl_read_start(device->cl, 0, NULL);
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
+		ret = mei_cl_read_start(cldev->cl, 0, NULL);
+		if (ret && ret != -EBUSY)
+			return ret;
+	}
+
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
+		mutex_lock(&cldev->cl->dev->device_lock);
+		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
+		mutex_unlock(&cldev->cl->dev->device_lock);
+		if (ret)
+			return ret;
+	}
 
 
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_register_event_cb);
 EXPORT_SYMBOL_GPL(mei_cl_register_event_cb);
 
 
-void *mei_cl_get_drvdata(const struct mei_cl_device *device)
+/**
+ * mei_cl_get_drvdata - driver data getter
+ *
+ * @cldev: mei client device
+ *
+ * Return: driver private data
+ */
+void *mei_cl_get_drvdata(const struct mei_cl_device *cldev)
 {
 {
-	return dev_get_drvdata(&device->dev);
+	return dev_get_drvdata(&cldev->dev);
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_get_drvdata);
 EXPORT_SYMBOL_GPL(mei_cl_get_drvdata);
 
 
-void mei_cl_set_drvdata(struct mei_cl_device *device, void *data)
+/**
+ * mei_cl_set_drvdata - driver data setter
+ *
+ * @cldev: mei client device
+ * @data: data to store
+ */
+void mei_cl_set_drvdata(struct mei_cl_device *cldev, void *data)
 {
 {
-	dev_set_drvdata(&device->dev, data);
+	dev_set_drvdata(&cldev->dev, data);
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_set_drvdata);
 EXPORT_SYMBOL_GPL(mei_cl_set_drvdata);
 
 
-int mei_cl_enable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_enable_device - enable me client device
+ *     create connection with me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_enable_device(struct mei_cl_device *cldev)
 {
 {
-	int err;
-	struct mei_device *dev;
-	struct mei_cl *cl = device->cl;
-
-	if (cl == NULL)
-		return -ENODEV;
-
-	dev = cl->dev;
-
-	mutex_lock(&dev->device_lock);
+	struct mei_device *bus = cldev->bus;
+	struct mei_cl *cl;
+	int ret;
+
+	cl = cldev->cl;
+
+	if (!cl) {
+		mutex_lock(&bus->device_lock);
+		cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+		mutex_unlock(&bus->device_lock);
+		if (IS_ERR(cl))
+			return PTR_ERR(cl);
+		/* update pointers */
+		cldev->cl = cl;
+		cl->cldev = cldev;
+	}
 
 
+	mutex_lock(&bus->device_lock);
 	if (mei_cl_is_connected(cl)) {
 	if (mei_cl_is_connected(cl)) {
-		mutex_unlock(&dev->device_lock);
-		dev_warn(dev->dev, "Already connected");
-		return -EBUSY;
+		ret = 0;
+		goto out;
 	}
 	}
 
 
-	err = mei_cl_connect(cl, device->me_cl, NULL);
-	if (err < 0) {
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not connect to the ME client");
-
-		return err;
+	if (!mei_me_cl_is_active(cldev->me_cl)) {
+		dev_err(&cldev->dev, "me client is not active\n");
+		ret = -ENOTTY;
+		goto out;
 	}
 	}
 
 
-	mutex_unlock(&dev->device_lock);
+	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
+	if (ret < 0)
+		dev_err(&cldev->dev, "cannot connect\n");
 
 
-	if (device->event_cb)
-		mei_cl_read_start(device->cl, 0, NULL);
+out:
+	mutex_unlock(&bus->device_lock);
 
 
-	return 0;
+	return ret;
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_enable_device);
 EXPORT_SYMBOL_GPL(mei_cl_enable_device);
 
 
-int mei_cl_disable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_disable_device - disable me client device
+ *     disconnect form the me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_disable_device(struct mei_cl_device *cldev)
 {
 {
+	struct mei_device *bus;
+	struct mei_cl *cl;
 	int err;
 	int err;
-	struct mei_device *dev;
-	struct mei_cl *cl = device->cl;
 
 
-	if (cl == NULL)
+	if (!cldev || !cldev->cl)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	dev = cl->dev;
+	cl = cldev->cl;
 
 
-	device->event_cb = NULL;
+	bus = cldev->bus;
 
 
-	mutex_lock(&dev->device_lock);
+	cldev->event_cb = NULL;
+
+	mutex_lock(&bus->device_lock);
 
 
 	if (!mei_cl_is_connected(cl)) {
 	if (!mei_cl_is_connected(cl)) {
-		dev_err(dev->dev, "Already disconnected");
+		dev_err(bus->dev, "Already disconnected");
 		err = 0;
 		err = 0;
 		goto out;
 		goto out;
 	}
 	}
 
 
 	err = mei_cl_disconnect(cl);
 	err = mei_cl_disconnect(cl);
-	if (err < 0) {
-		dev_err(dev->dev, "Could not disconnect from the ME client");
-		goto out;
-	}
+	if (err < 0)
+		dev_err(bus->dev, "Could not disconnect from the ME client");
 
 
+out:
 	/* Flush queues and remove any pending read */
 	/* Flush queues and remove any pending read */
 	mei_cl_flush_queues(cl, NULL);
 	mei_cl_flush_queues(cl, NULL);
+	mei_cl_unlink(cl);
 
 
-out:
-	mutex_unlock(&dev->device_lock);
-	return err;
+	kfree(cl);
+	cldev->cl = NULL;
 
 
+	mutex_unlock(&bus->device_lock);
+	return err;
 }
 }
 EXPORT_SYMBOL_GPL(mei_cl_disable_device);
 EXPORT_SYMBOL_GPL(mei_cl_disable_device);
 
 
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+/**
+ * mei_cl_device_find - find matching entry in the driver id table
+ *
+ * @cldev: me client device
+ * @cldrv: me client driver
+ *
+ * Return: id on success; NULL if no id is matching
+ */
+static const
+struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev,
+					    struct mei_cl_driver *cldrv)
 {
 {
-	struct mei_cl_device *device = cl->device;
+	const struct mei_cl_device_id *id;
+	const uuid_le *uuid;
+
+	uuid = mei_me_cl_uuid(cldev->me_cl);
+
+	id = cldrv->id_table;
+	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
+		if (!uuid_le_cmp(*uuid, id->uuid)) {
+
+			if (!cldev->name[0])
+				return id;
+
+			if (!strncmp(cldev->name, id->name, sizeof(id->name)))
+				return id;
+		}
+
+		id++;
+	}
+
+	return NULL;
+}
+
+/**
+ * mei_cl_device_match  - device match function
+ *
+ * @dev: device
+ * @drv: driver
+ *
+ * Return:  1 if matching device was found 0 otherwise
+ */
+static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
+	const struct mei_cl_device_id *found_id;
+
+	if (!cldev)
+		return 0;
+
+	if (!cldev->do_match)
+		return 0;
+
+	if (!cldrv || !cldrv->id_table)
+		return 0;
+
+	found_id = mei_cl_device_find(cldev, cldrv);
+	if (found_id)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * mei_cl_device_probe - bus probe function
+ *
+ * @dev: device
+ *
+ * Return:  0 on success; < 0 otherwise
+ */
+static int mei_cl_device_probe(struct device *dev)
+{
+	struct mei_cl_device *cldev;
+	struct mei_cl_driver *cldrv;
+	const struct mei_cl_device_id *id;
+
+	cldev = to_mei_cl_device(dev);
+	cldrv = to_mei_cl_driver(dev->driver);
+
+	if (!cldev)
+		return 0;
+
+	if (!cldrv || !cldrv->probe)
+		return -ENODEV;
+
+	id = mei_cl_device_find(cldev, cldrv);
+	if (!id)
+		return -ENODEV;
+
+	__module_get(THIS_MODULE);
+
+	return cldrv->probe(cldev, id);
+}
+
+/**
+ * mei_cl_device_remove - remove device from the bus
+ *
+ * @dev: device
+ *
+ * Return:  0 on success; < 0 otherwise
+ */
+static int mei_cl_device_remove(struct device *dev)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	struct mei_cl_driver *cldrv;
+	int ret = 0;
+
+	if (!cldev || !dev->driver)
+		return 0;
+
+	if (cldev->event_cb) {
+		cldev->event_cb = NULL;
+		cancel_work_sync(&cldev->event_work);
+	}
+
+	cldrv = to_mei_cl_driver(dev->driver);
+	if (cldrv->remove)
+		ret = cldrv->remove(cldev);
+
+	module_put(THIS_MODULE);
+	dev->driver = NULL;
+	return ret;
+
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "%s", cldev->name);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(uuid);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
+		cldev->name, MEI_CL_UUID_ARGS(uuid->b));
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *mei_cl_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_uuid.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(mei_cl_dev);
+
+/**
+ * mei_cl_device_uevent - me client bus uevent handler
+ *
+ * @dev: device
+ * @env: uevent kobject
+ *
+ * Return: 0 on success -ENOMEM on when add_uevent_var fails
+ */
+static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+
+	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
+		cldev->name, MEI_CL_UUID_ARGS(uuid->b)))
+		return -ENOMEM;
 
 
-	if (!device || !device->event_cb)
+	return 0;
+}
+
+static struct bus_type mei_cl_bus_type = {
+	.name		= "mei",
+	.dev_groups	= mei_cl_dev_groups,
+	.match		= mei_cl_device_match,
+	.probe		= mei_cl_device_probe,
+	.remove		= mei_cl_device_remove,
+	.uevent		= mei_cl_device_uevent,
+};
+
+static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
+{
+	if (bus)
+		get_device(bus->dev);
+
+	return bus;
+}
+
+static void mei_dev_bus_put(struct mei_device *bus)
+{
+	if (bus)
+		put_device(bus->dev);
+}
+
+static void mei_cl_dev_release(struct device *dev)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+
+	if (!cldev)
+		return;
+
+	mei_me_cl_put(cldev->me_cl);
+	mei_dev_bus_put(cldev->bus);
+	kfree(cldev);
+}
+
+static struct device_type mei_cl_device_type = {
+	.release	= mei_cl_dev_release,
+};
+
+/**
+ * mei_cl_dev_alloc - initialize and allocate mei client device
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ *
+ * Return: allocated device structur or NULL on allocation failure
+ */
+static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus,
+					      struct mei_me_client *me_cl)
+{
+	struct mei_cl_device *cldev;
+
+	cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+	if (!cldev)
+		return NULL;
+
+	device_initialize(&cldev->dev);
+	cldev->dev.parent = bus->dev;
+	cldev->dev.bus    = &mei_cl_bus_type;
+	cldev->dev.type   = &mei_cl_device_type;
+	cldev->bus        = mei_dev_bus_get(bus);
+	cldev->me_cl      = mei_me_cl_get(me_cl);
+	cldev->is_added   = 0;
+	INIT_LIST_HEAD(&cldev->bus_list);
+
+	return cldev;
+}
+
+/**
+ * mei_cl_dev_setup - setup me client device
+ *    run fix up routines and set the device name
+ *
+ * @bus: mei device
+ * @cldev: me client device
+ *
+ * Return: true if the device is eligible for enumeration
+ */
+static bool mei_cl_dev_setup(struct mei_device *bus,
+			     struct mei_cl_device *cldev)
+{
+	cldev->do_match = 1;
+	mei_cl_dev_fixup(cldev);
+
+	if (cldev->do_match)
+		dev_set_name(&cldev->dev, "mei:%s:%pUl",
+			     cldev->name, mei_me_cl_uuid(cldev->me_cl));
+
+	return cldev->do_match == 1;
+}
+
+/**
+ * mei_cl_bus_dev_add - add me client devices
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success; < 0 on failre
+ */
+static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
+{
+	int ret;
+
+	dev_dbg(cldev->bus->dev, "adding %pUL\n", mei_me_cl_uuid(cldev->me_cl));
+	ret = device_add(&cldev->dev);
+	if (!ret)
+		cldev->is_added = 1;
+
+	return ret;
+}
+
+/**
+ * mei_cl_bus_dev_stop - stop the driver
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
+{
+	if (cldev->is_added)
+		device_release_driver(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_dev_destroy - destroy me client devices object
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
+{
+	if (!cldev->is_added)
+		return;
+
+	device_del(&cldev->dev);
+
+	mutex_lock(&cldev->bus->cl_bus_lock);
+	list_del_init(&cldev->bus_list);
+	mutex_unlock(&cldev->bus->cl_bus_lock);
+
+	cldev->is_added = 0;
+	put_device(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_remove_device - remove a devices form the bus
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
+{
+	mei_cl_bus_dev_stop(cldev);
+	mei_cl_bus_dev_destroy(cldev);
+}
+
+/**
+ * mei_cl_bus_remove_devices - remove all devices form the bus
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_remove_devices(struct mei_device *bus)
+{
+	struct mei_cl_device *cldev, *next;
+
+	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
+		mei_cl_bus_remove_device(cldev);
+}
+
+
+/**
+ * mei_cl_dev_init - allocate and initializes an mei client devices
+ *     based on me client
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ */
+static void mei_cl_dev_init(struct mei_device *bus, struct mei_me_client *me_cl)
+{
+	struct mei_cl_device *cldev;
+
+	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
+
+	if (me_cl->bus_added)
 		return;
 		return;
 
 
-	set_bit(MEI_CL_EVENT_RX, &device->events);
+	cldev = mei_cl_dev_alloc(bus, me_cl);
+	if (!cldev)
+		return;
+
+	mutex_lock(&cldev->bus->cl_bus_lock);
+	me_cl->bus_added = true;
+	list_add_tail(&cldev->bus_list, &bus->device_list);
+	mutex_unlock(&cldev->bus->cl_bus_lock);
+
+}
+
+/**
+ * mei_cl_bus_rescan - scan me clients list and add create
+ *    devices for eligible clients
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_rescan(struct mei_device *bus)
+{
+	struct mei_cl_device *cldev, *n;
+	struct mei_me_client *me_cl;
+
+	down_read(&bus->me_clients_rwsem);
+	list_for_each_entry(me_cl, &bus->me_clients, list)
+		mei_cl_dev_init(bus, me_cl);
+	up_read(&bus->me_clients_rwsem);
+
+	mutex_lock(&bus->cl_bus_lock);
+	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
+
+		if (!mei_me_cl_is_active(cldev->me_cl)) {
+			mei_cl_bus_remove_device(cldev);
+			continue;
+		}
+
+		if (cldev->is_added)
+			continue;
+
+		if (mei_cl_dev_setup(bus, cldev))
+			mei_cl_bus_dev_add(cldev);
+		else {
+			list_del_init(&cldev->bus_list);
+			put_device(&cldev->dev);
+		}
+	}
+	mutex_unlock(&bus->cl_bus_lock);
+
+	dev_dbg(bus->dev, "rescan end");
+}
+
+int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner)
+{
+	int err;
+
+	cldrv->driver.name = cldrv->name;
+	cldrv->driver.owner = owner;
+	cldrv->driver.bus = &mei_cl_bus_type;
+
+	err = driver_register(&cldrv->driver);
+	if (err)
+		return err;
 
 
-	schedule_work(&device->event_work);
+	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
+
+	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
+
+void mei_cl_driver_unregister(struct mei_cl_driver *cldrv)
+{
+	driver_unregister(&cldrv->driver);
+
+	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
+}
+EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
+
 
 
 int __init mei_cl_bus_init(void)
 int __init mei_cl_bus_init(void)
 {
 {

+ 292 - 41
drivers/misc/mei/client.c

@@ -555,10 +555,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
 	init_waitqueue_head(&cl->wait);
 	init_waitqueue_head(&cl->wait);
 	init_waitqueue_head(&cl->rx_wait);
 	init_waitqueue_head(&cl->rx_wait);
 	init_waitqueue_head(&cl->tx_wait);
 	init_waitqueue_head(&cl->tx_wait);
+	init_waitqueue_head(&cl->ev_wait);
 	INIT_LIST_HEAD(&cl->rd_completed);
 	INIT_LIST_HEAD(&cl->rd_completed);
 	INIT_LIST_HEAD(&cl->rd_pending);
 	INIT_LIST_HEAD(&cl->rd_pending);
 	INIT_LIST_HEAD(&cl->link);
 	INIT_LIST_HEAD(&cl->link);
-	INIT_LIST_HEAD(&cl->device_link);
 	cl->writing_state = MEI_IDLE;
 	cl->writing_state = MEI_IDLE;
 	cl->state = MEI_FILE_INITIALIZING;
 	cl->state = MEI_FILE_INITIALIZING;
 	cl->dev = dev;
 	cl->dev = dev;
@@ -690,16 +690,12 @@ void mei_host_client_init(struct work_struct *work)
 		mei_wd_host_init(dev, me_cl);
 		mei_wd_host_init(dev, me_cl);
 	mei_me_cl_put(me_cl);
 	mei_me_cl_put(me_cl);
 
 
-	me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
-	if (me_cl)
-		mei_nfc_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
-
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->reset_count = 0;
 	dev->reset_count = 0;
 	mutex_unlock(&dev->device_lock);
 	mutex_unlock(&dev->device_lock);
 
 
+	mei_cl_bus_rescan(dev);
+
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_mark_last_busy(dev->dev);
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
 	pm_runtime_autosuspend(dev->dev);
 	pm_runtime_autosuspend(dev->dev);
@@ -841,45 +837,22 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
 	return ret;
 	return ret;
 }
 }
 
 
-
-
 /**
 /**
- * mei_cl_disconnect - disconnect host client from the me one
+ * __mei_cl_disconnect - disconnect host client from the me one
+ *     internal function runtime pm has to be already acquired
  *
  *
  * @cl: host client
  * @cl: host client
  *
  *
- * Locking: called under "dev->device_lock" lock
- *
  * Return: 0 on success, <0 on failure.
  * Return: 0 on success, <0 on failure.
  */
  */
-int mei_cl_disconnect(struct mei_cl *cl)
+static int __mei_cl_disconnect(struct mei_cl *cl)
 {
 {
 	struct mei_device *dev;
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
 	struct mei_cl_cb *cb;
 	int rets;
 	int rets;
 
 
-	if (WARN_ON(!cl || !cl->dev))
-		return -ENODEV;
-
 	dev = cl->dev;
 	dev = cl->dev;
 
 
-	cl_dbg(dev, cl, "disconnecting");
-
-	if (!mei_cl_is_connected(cl))
-		return 0;
-
-	if (mei_cl_is_fixed_address(cl)) {
-		mei_cl_set_disconnected(cl);
-		return 0;
-	}
-
-	rets = pm_runtime_get(dev->dev);
-	if (rets < 0 && rets != -EINPROGRESS) {
-		pm_runtime_put_noidle(dev->dev);
-		cl_err(dev, cl, "rpm: get failed %d\n", rets);
-		return rets;
-	}
-
 	cl->state = MEI_FILE_DISCONNECTING;
 	cl->state = MEI_FILE_DISCONNECTING;
 
 
 	cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
 	cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
@@ -915,11 +888,52 @@ out:
 	if (!rets)
 	if (!rets)
 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
 
 
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_disconnect - disconnect host client from the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	cl_dbg(dev, cl, "disconnecting");
+
+	if (!mei_cl_is_connected(cl))
+		return 0;
+
+	if (mei_cl_is_fixed_address(cl)) {
+		mei_cl_set_disconnected(cl);
+		return 0;
+	}
+
+	rets = pm_runtime_get(dev->dev);
+	if (rets < 0 && rets != -EINPROGRESS) {
+		pm_runtime_put_noidle(dev->dev);
+		cl_err(dev, cl, "rpm: get failed %d\n", rets);
+		return rets;
+	}
+
+	rets = __mei_cl_disconnect(cl);
+
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
 
 
-	mei_io_cb_free(cb);
 	return rets;
 	return rets;
 }
 }
 
 
@@ -1064,11 +1078,23 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
 	mutex_unlock(&dev->device_lock);
 	mutex_unlock(&dev->device_lock);
 	wait_event_timeout(cl->wait,
 	wait_event_timeout(cl->wait,
 			(cl->state == MEI_FILE_CONNECTED ||
 			(cl->state == MEI_FILE_CONNECTED ||
+			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
 	mutex_lock(&dev->device_lock);
 	mutex_lock(&dev->device_lock);
 
 
 	if (!mei_cl_is_connected(cl)) {
 	if (!mei_cl_is_connected(cl)) {
+		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
+			mei_io_list_flush(&dev->ctrl_rd_list, cl);
+			mei_io_list_flush(&dev->ctrl_wr_list, cl);
+			 /* ignore disconnect return valuue;
+			  * in case of failure reset will be invoked
+			  */
+			__mei_cl_disconnect(cl);
+			rets = -EFAULT;
+			goto out;
+		}
+
 		/* timeout or something went really wrong */
 		/* timeout or something went really wrong */
 		if (!cl->status)
 		if (!cl->status)
 			cl->status = -EFAULT;
 			cl->status = -EFAULT;
@@ -1180,6 +1206,221 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ *  mei_cl_notify_fop2req - convert fop to proper request
+ *
+ * @fop: client notification start response command
+ *
+ * Return:  MEI_HBM_NOTIFICATION_START/STOP
+ */
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
+{
+	if (fop == MEI_FOP_NOTIFY_START)
+		return MEI_HBM_NOTIFICATION_START;
+	else
+		return MEI_HBM_NOTIFICATION_STOP;
+}
+
+/**
+ *  mei_cl_notify_req2fop - convert notification request top file operation type
+ *
+ * @req: hbm notification request type
+ *
+ * Return:  MEI_FOP_NOTIFY_START/STOP
+ */
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
+{
+	if (req == MEI_HBM_NOTIFICATION_START)
+		return MEI_FOP_NOTIFY_START;
+	else
+		return MEI_FOP_NOTIFY_STOP;
+}
+
+/**
+ * mei_cl_irq_notify - send notification request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+		      struct mei_cl_cb *cmpl_list)
+{
+	struct mei_device *dev = cl->dev;
+	u32 msg_slots;
+	int slots;
+	int ret;
+	bool request;
+
+	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+	slots = mei_hbuf_empty_slots(dev);
+
+	if (slots < msg_slots)
+		return -EMSGSIZE;
+
+	request = mei_cl_notify_fop2req(cb->fop_type);
+	ret = mei_hbm_cl_notify_req(dev, cl, request);
+	if (ret) {
+		cl->status = ret;
+		list_move_tail(&cb->list, &cmpl_list->list);
+		return ret;
+	}
+
+	list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+	return 0;
+}
+
+/**
+ * mei_cl_notify_request - send notification stop/start request
+ *
+ * @cl: host client
+ * @file: associate request with file
+ * @request: 1 for start or 0 for stop
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+{
+	struct mei_device *dev;
+	struct mei_cl_cb *cb;
+	enum mei_cb_file_ops fop_type;
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (!dev->hbm_f_ev_supported) {
+		cl_dbg(dev, cl, "notifications not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	rets = pm_runtime_get(dev->dev);
+	if (rets < 0 && rets != -EINPROGRESS) {
+		pm_runtime_put_noidle(dev->dev);
+		cl_err(dev, cl, "rpm: get failed %d\n", rets);
+		return rets;
+	}
+
+	fop_type = mei_cl_notify_req2fop(request);
+	cb = mei_io_cb_init(cl, fop_type, file);
+	if (!cb) {
+		rets = -ENOMEM;
+		goto out;
+	}
+
+	if (mei_hbuf_acquire(dev)) {
+		if (mei_hbm_cl_notify_req(dev, cl, request)) {
+			rets = -ENODEV;
+			goto out;
+		}
+		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+	} else {
+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(cl->wait, cl->notify_en == request,
+			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+	mutex_lock(&dev->device_lock);
+
+	if (cl->notify_en != request) {
+		mei_io_list_flush(&dev->ctrl_rd_list, cl);
+		mei_io_list_flush(&dev->ctrl_wr_list, cl);
+		if (!cl->status)
+			cl->status = -EFAULT;
+	}
+
+	rets = cl->status;
+
+out:
+	cl_dbg(dev, cl, "rpm: autosuspend\n");
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_notify - raise notification
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
+void mei_cl_notify(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+
+	if (!cl || !cl->dev)
+		return;
+
+	dev = cl->dev;
+
+	if (!cl->notify_en)
+		return;
+
+	cl_dbg(dev, cl, "notify event");
+	cl->notify_ev = true;
+	wake_up_interruptible_all(&cl->ev_wait);
+
+	if (cl->ev_async)
+		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
+
+	mei_cl_bus_notify_event(cl);
+}
+
+/**
+ * mei_cl_notify_get - get or wait for notification event
+ *
+ * @cl: host client
+ * @block: this request is blocking
+ * @notify_ev: true if notification event was received
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
+{
+	struct mei_device *dev;
+	int rets;
+
+	*notify_ev = false;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (!mei_cl_is_connected(cl))
+		return -ENODEV;
+
+	if (cl->notify_ev)
+		goto out;
+
+	if (!block)
+		return -EAGAIN;
+
+	mutex_unlock(&dev->device_lock);
+	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
+	mutex_lock(&dev->device_lock);
+
+	if (rets < 0)
+		return rets;
+
+out:
+	*notify_ev = cl->notify_ev;
+	cl->notify_ev = false;
+	return 0;
+}
+
 /**
 /**
  * mei_cl_read_start - the start read client message function.
  * mei_cl_read_start - the start read client message function.
  *
  *
@@ -1356,6 +1597,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 	struct mei_device *dev;
 	struct mei_device *dev;
 	struct mei_msg_data *buf;
 	struct mei_msg_data *buf;
 	struct mei_msg_hdr mei_hdr;
 	struct mei_msg_hdr mei_hdr;
+	int size;
 	int rets;
 	int rets;
 
 
 
 
@@ -1367,10 +1609,10 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 
 
 	dev = cl->dev;
 	dev = cl->dev;
 
 
-
 	buf = &cb->buf;
 	buf = &cb->buf;
+	size = buf->size;
 
 
-	cl_dbg(dev, cl, "size=%d\n", buf->size);
+	cl_dbg(dev, cl, "size=%d\n", size);
 
 
 	rets = pm_runtime_get(dev->dev);
 	rets = pm_runtime_get(dev->dev);
 	if (rets < 0 && rets != -EINPROGRESS) {
 	if (rets < 0 && rets != -EINPROGRESS) {
@@ -1394,21 +1636,21 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 
 
 	if (rets == 0) {
 	if (rets == 0) {
 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
-		rets = buf->size;
+		rets = size;
 		goto out;
 		goto out;
 	}
 	}
 	if (!mei_hbuf_acquire(dev)) {
 	if (!mei_hbuf_acquire(dev)) {
 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
-		rets = buf->size;
+		rets = size;
 		goto out;
 		goto out;
 	}
 	}
 
 
 	/* Check for a maximum length */
 	/* Check for a maximum length */
-	if (buf->size > mei_hbuf_max_len(dev)) {
+	if (size > mei_hbuf_max_len(dev)) {
 		mei_hdr.length = mei_hbuf_max_len(dev);
 		mei_hdr.length = mei_hbuf_max_len(dev);
 		mei_hdr.msg_complete = 0;
 		mei_hdr.msg_complete = 0;
 	} else {
 	} else {
-		mei_hdr.length = buf->size;
+		mei_hdr.length = size;
 		mei_hdr.msg_complete = 1;
 		mei_hdr.msg_complete = 1;
 	}
 	}
 
 
@@ -1430,6 +1672,7 @@ out:
 	else
 	else
 		list_add_tail(&cb->list, &dev->write_list.list);
 		list_add_tail(&cb->list, &dev->write_list.list);
 
 
+	cb = NULL;
 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
 
 
 		mutex_unlock(&dev->device_lock);
 		mutex_unlock(&dev->device_lock);
@@ -1444,7 +1687,7 @@ out:
 		}
 		}
 	}
 	}
 
 
-	rets = buf->size;
+	rets = size;
 err:
 err:
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_mark_last_busy(dev->dev);
@@ -1486,6 +1729,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 
 
 	case MEI_FOP_CONNECT:
 	case MEI_FOP_CONNECT:
 	case MEI_FOP_DISCONNECT:
 	case MEI_FOP_DISCONNECT:
+	case MEI_FOP_NOTIFY_STOP:
+	case MEI_FOP_NOTIFY_START:
 		if (waitqueue_active(&cl->wait))
 		if (waitqueue_active(&cl->wait))
 			wake_up(&cl->wait);
 			wake_up(&cl->wait);
 
 
@@ -1528,6 +1773,12 @@ void mei_cl_all_wakeup(struct mei_device *dev)
 			cl_dbg(dev, cl, "Waking up writing client!\n");
 			cl_dbg(dev, cl, "Waking up writing client!\n");
 			wake_up_interruptible(&cl->tx_wait);
 			wake_up_interruptible(&cl->tx_wait);
 		}
 		}
+
+		/* synchronized under device mutex */
+		if (waitqueue_active(&cl->ev_wait)) {
+			cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+			wake_up_interruptible(&cl->ev_wait);
+		}
 	}
 	}
 }
 }
 
 

+ 8 - 0
drivers/misc/mei/client.h

@@ -219,6 +219,14 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 
 
 void mei_host_client_init(struct work_struct *work);
 void mei_host_client_init(struct work_struct *work);
 
 
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+		      struct mei_cl_cb *cmpl_list);
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
+void mei_cl_notify(struct mei_cl *cl);
+
 void mei_cl_all_disconnect(struct mei_device *dev);
 void mei_cl_all_disconnect(struct mei_device *dev);
 void mei_cl_all_wakeup(struct mei_device *dev);
 void mei_cl_all_wakeup(struct mei_device *dev);
 void mei_cl_all_write_clear(struct mei_device *dev);
 void mei_cl_all_write_clear(struct mei_device *dev);

+ 6 - 0
drivers/misc/mei/debugfs.c

@@ -154,6 +154,12 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
 		pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
 		pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
 				 dev->hbm_f_pg_supported);
 				 dev->hbm_f_pg_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
+				 dev->hbm_f_dc_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
+				 dev->hbm_f_dot_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
+				 dev->hbm_f_ev_supported);
 	}
 	}
 
 
 	pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
 	pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",

+ 314 - 16
drivers/misc/mei/hbm.c

@@ -52,6 +52,7 @@ static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
 	MEI_CL_CS(ALREADY_STARTED);
 	MEI_CL_CS(ALREADY_STARTED);
 	MEI_CL_CS(OUT_OF_RESOURCES);
 	MEI_CL_CS(OUT_OF_RESOURCES);
 	MEI_CL_CS(MESSAGE_SMALL);
 	MEI_CL_CS(MESSAGE_SMALL);
+	MEI_CL_CS(NOT_ALLOWED);
 	default: return "unknown";
 	default: return "unknown";
 	}
 	}
 #undef MEI_CL_CCS
 #undef MEI_CL_CCS
@@ -89,6 +90,7 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
 	case MEI_CL_CONN_ALREADY_STARTED:  return -EBUSY;
 	case MEI_CL_CONN_ALREADY_STARTED:  return -EBUSY;
 	case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
 	case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
 	case MEI_CL_CONN_MESSAGE_SMALL:    return -EINVAL;
 	case MEI_CL_CONN_MESSAGE_SMALL:    return -EINVAL;
+	case MEI_CL_CONN_NOT_ALLOWED:      return -EBUSY;
 	default:                           return -EINVAL;
 	default:                           return -EINVAL;
 	}
 	}
 }
 }
@@ -299,6 +301,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
 	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
 	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
 	memset(enum_req, 0, len);
 	memset(enum_req, 0, len);
 	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
 	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+	enum_req->allow_add = dev->hbm_f_dc_supported;
 
 
 	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
 	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
 	if (ret) {
 	if (ret) {
@@ -343,6 +346,180 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * mei_hbm_add_cl_resp - send response to fw on client add request
+ *
+ * @dev: the device structure
+ * @addr: me address
+ * @status: response status
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_add_client_response *resp;
+	const size_t len = sizeof(struct hbm_add_client_response);
+	int ret;
+
+	dev_dbg(dev->dev, "adding client response\n");
+
+	resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+
+	mei_hbm_hdr(mei_hdr, len);
+	memset(resp, 0, sizeof(struct hbm_add_client_response));
+
+	resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+	resp->me_addr = addr;
+	resp->status  = status;
+
+	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+	if (ret)
+		dev_err(dev->dev, "add client response write failed: ret = %d\n",
+			ret);
+	return ret;
+}
+
+/**
+ * mei_hbm_fw_add_cl_req - request from the fw to add a client
+ *
+ * @dev: the device structure
+ * @req: add client request
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
+			      struct hbm_add_client_request *req)
+{
+	int ret;
+	u8 status = MEI_HBMS_SUCCESS;
+
+	BUILD_BUG_ON(sizeof(struct hbm_add_client_request) !=
+			sizeof(struct hbm_props_response));
+
+	ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
+	if (ret)
+		status = !MEI_HBMS_SUCCESS;
+
+	return mei_hbm_add_cl_resp(dev, req->me_addr, status);
+}
+
+/**
+ * mei_hbm_cl_notify_req - send notification request
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ * @start: true for start false for stop
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+			  struct mei_cl *cl, u8 start)
+{
+
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_notification_request *req;
+	const size_t len = sizeof(struct hbm_notification_request);
+	int ret;
+
+	mei_hbm_hdr(mei_hdr, len);
+	mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+
+	req = (struct hbm_notification_request *)dev->wr_msg.data;
+	req->start = start;
+
+	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+	if (ret)
+		dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
+
+	return ret;
+}
+
+/**
+ *  notify_res_to_fop - convert notification response to the proper
+ *      notification FOP
+ *
+ * @cmd: client notification start response command
+ *
+ * Return:  MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP;
+ */
+static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	return mei_cl_notify_req2fop(rs->start);
+}
+
+/**
+ * mei_hbm_cl_notify_start_res - update the client state according
+ *       notify start response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification start response command
+ */
+static void mei_hbm_cl_notify_start_res(struct mei_device *dev,
+					struct mei_cl *cl,
+					struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
+
+	if (rs->status == MEI_HBMS_SUCCESS ||
+	    rs->status == MEI_HBMS_ALREADY_STARTED) {
+		cl->notify_en = true;
+		cl->status = 0;
+	} else {
+		cl->status = -EINVAL;
+	}
+}
+
+/**
+ * mei_hbm_cl_notify_stop_res - update the client state according
+ *       notify stop response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification stop response command
+ */
+static void mei_hbm_cl_notify_stop_res(struct mei_device *dev,
+				       struct mei_cl *cl,
+				       struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
+
+	if (rs->status == MEI_HBMS_SUCCESS ||
+	    rs->status == MEI_HBMS_NOT_STARTED) {
+		cl->notify_en = false;
+		cl->status = 0;
+	} else {
+		/* TODO: spec is not clear yet about other possible issues */
+		cl->status = -EINVAL;
+	}
+}
+
+/**
+ * mei_hbm_cl_notify - signal notification event
+ *
+ * @dev: the device structure
+ * @cmd: notification client message
+ */
+static void mei_hbm_cl_notify(struct mei_device *dev,
+			      struct mei_hbm_cl_cmd *cmd)
+{
+	struct mei_cl *cl;
+
+	cl = mei_hbm_cl_find_by_cmd(dev, cmd);
+	if (cl)
+		mei_cl_notify(cl);
+}
+
 /**
 /**
  * mei_hbm_prop_req - request property for a single client
  * mei_hbm_prop_req - request property for a single client
  *
  *
@@ -610,8 +787,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
 
 
 	if (rs->status == MEI_CL_CONN_SUCCESS)
 	if (rs->status == MEI_CL_CONN_SUCCESS)
 		cl->state = MEI_FILE_CONNECTED;
 		cl->state = MEI_FILE_CONNECTED;
-	else
+	else {
 		cl->state = MEI_FILE_DISCONNECT_REPLY;
 		cl->state = MEI_FILE_DISCONNECT_REPLY;
+		if (rs->status == MEI_CL_CONN_NOT_FOUND)
+			mei_me_cl_del(dev, cl->me_cl);
+	}
 	cl->status = mei_cl_conn_status_to_errno(rs->status);
 	cl->status = mei_cl_conn_status_to_errno(rs->status);
 }
 }
 
 
@@ -654,6 +834,12 @@ static void mei_hbm_cl_res(struct mei_device *dev,
 	case MEI_FOP_DISCONNECT:
 	case MEI_FOP_DISCONNECT:
 		mei_hbm_cl_disconnect_res(dev, cl, rs);
 		mei_hbm_cl_disconnect_res(dev, cl, rs);
 		break;
 		break;
+	case MEI_FOP_NOTIFY_START:
+		mei_hbm_cl_notify_start_res(dev, cl, rs);
+		break;
+	case MEI_FOP_NOTIFY_STOP:
+		mei_hbm_cl_notify_stop_res(dev, cl, rs);
+		break;
 	default:
 	default:
 		return;
 		return;
 	}
 	}
@@ -693,6 +879,79 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * mei_hbm_pg_enter_res - PG enter response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_enter_res(struct mei_device *dev)
+{
+	if (mei_pg_state(dev) != MEI_PG_OFF ||
+	    dev->pg_event != MEI_PG_EVENT_WAIT) {
+		dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
+			mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+		return -EPROTO;
+	}
+
+	dev->pg_event = MEI_PG_EVENT_RECEIVED;
+	wake_up(&dev->wait_pg);
+
+	return 0;
+}
+
+/**
+ * mei_hbm_pg_resume - process with PG resume
+ *
+ * @dev: the device structure.
+ */
+void mei_hbm_pg_resume(struct mei_device *dev)
+{
+	pm_request_resume(dev->dev);
+}
+EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
+
+/**
+ * mei_hbm_pg_exit_res - PG exit response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_exit_res(struct mei_device *dev)
+{
+	if (mei_pg_state(dev) != MEI_PG_ON ||
+	    (dev->pg_event != MEI_PG_EVENT_WAIT &&
+	     dev->pg_event != MEI_PG_EVENT_IDLE)) {
+		dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
+			mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+		return -EPROTO;
+	}
+
+	switch (dev->pg_event) {
+	case MEI_PG_EVENT_WAIT:
+		dev->pg_event = MEI_PG_EVENT_RECEIVED;
+		wake_up(&dev->wait_pg);
+		break;
+	case MEI_PG_EVENT_IDLE:
+		/*
+		* If the driver is not waiting on this then
+		* this is HW initiated exit from PG.
+		* Start runtime pm resume sequence to exit from PG.
+		*/
+		dev->pg_event = MEI_PG_EVENT_RECEIVED;
+		mei_hbm_pg_resume(dev);
+		break;
+	default:
+		WARN(1, "hbm: pg exit response: unexpected pg event = %d\n",
+		     dev->pg_event);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
 /**
 /**
  * mei_hbm_config_features - check what hbm features and commands
  * mei_hbm_config_features - check what hbm features and commands
  *        are supported by the fw
  *        are supported by the fw
@@ -709,6 +968,17 @@ static void mei_hbm_config_features(struct mei_device *dev)
 	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
 	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
 	    dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
 	    dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
 		dev->hbm_f_pg_supported = 1;
 		dev->hbm_f_pg_supported = 1;
+
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
+		dev->hbm_f_dc_supported = 1;
+
+	/* disconnect on connect timeout instead of link reset */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
+		dev->hbm_f_dot_supported = 1;
+
+	/* Notification Event Support */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
+		dev->hbm_f_ev_supported = 1;
 }
 }
 
 
 /**
 /**
@@ -740,6 +1010,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 	struct hbm_host_version_response *version_res;
 	struct hbm_host_version_response *version_res;
 	struct hbm_props_response *props_res;
 	struct hbm_props_response *props_res;
 	struct hbm_host_enum_response *enum_res;
 	struct hbm_host_enum_response *enum_res;
+	struct hbm_add_client_request *add_cl_req;
+	int ret;
 
 
 	struct mei_hbm_cl_cmd *cl_cmd;
 	struct mei_hbm_cl_cmd *cl_cmd;
 	struct hbm_client_connect_request *disconnect_req;
 	struct hbm_client_connect_request *disconnect_req;
@@ -828,24 +1100,17 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 		break;
 		break;
 
 
 	case MEI_PG_ISOLATION_ENTRY_RES_CMD:
 	case MEI_PG_ISOLATION_ENTRY_RES_CMD:
-		dev_dbg(dev->dev, "power gate isolation entry response received\n");
-		dev->pg_event = MEI_PG_EVENT_RECEIVED;
-		if (waitqueue_active(&dev->wait_pg))
-			wake_up(&dev->wait_pg);
+		dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
+		ret = mei_hbm_pg_enter_res(dev);
+		if (ret)
+			return ret;
 		break;
 		break;
 
 
 	case MEI_PG_ISOLATION_EXIT_REQ_CMD:
 	case MEI_PG_ISOLATION_EXIT_REQ_CMD:
-		dev_dbg(dev->dev, "power gate isolation exit request received\n");
-		dev->pg_event = MEI_PG_EVENT_RECEIVED;
-		if (waitqueue_active(&dev->wait_pg))
-			wake_up(&dev->wait_pg);
-		else
-			/*
-			* If the driver is not waiting on this then
-			* this is HW initiated exit from PG.
-			* Start runtime pm resume sequence to exit from PG.
-			*/
-			pm_request_resume(dev->dev);
+		dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
+		ret = mei_hbm_pg_exit_res(dev);
+		if (ret)
+			return ret;
 		break;
 		break;
 
 
 	case HOST_CLIENT_PROPERTIES_RES_CMD:
 	case HOST_CLIENT_PROPERTIES_RES_CMD:
@@ -937,6 +1202,39 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 			return -EIO;
 			return -EIO;
 		}
 		}
 		break;
 		break;
+
+	case MEI_HBM_ADD_CLIENT_REQ_CMD:
+		dev_dbg(dev->dev, "hbm: add client request received\n");
+		/*
+		 * after the host receives the enum_resp
+		 * message clients may be added or removed
+		 */
+		if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+		    dev->hbm_state >= MEI_HBM_STOPPED) {
+			dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
+				dev->dev_state, dev->hbm_state);
+			return -EPROTO;
+		}
+		add_cl_req = (struct hbm_add_client_request *)mei_msg;
+		ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
+		if (ret) {
+			dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
+				ret);
+			return -EIO;
+		}
+		dev_dbg(dev->dev, "hbm: add client request processed\n");
+		break;
+
+	case MEI_HBM_NOTIFY_RES_CMD:
+		dev_dbg(dev->dev, "hbm: notify response received\n");
+		mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
+		break;
+
+	case MEI_HBM_NOTIFICATION_CMD:
+		dev_dbg(dev->dev, "hbm: notification\n");
+		mei_hbm_cl_notify(dev, cl_cmd);
+		break;
+
 	default:
 	default:
 		BUG();
 		BUG();
 		break;
 		break;

+ 3 - 0
drivers/misc/mei/hbm.h

@@ -54,6 +54,9 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
 int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
 int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
 bool mei_hbm_version_is_supported(struct mei_device *dev);
 bool mei_hbm_version_is_supported(struct mei_device *dev);
 int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
 int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
+void mei_hbm_pg_resume(struct mei_device *dev);
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+			  struct mei_cl *cl, u8 request);
 
 
 #endif /* _MEI_HBM_H_ */
 #endif /* _MEI_HBM_H_ */
 
 

+ 23 - 4
drivers/misc/mei/hw-me-regs.h

@@ -117,12 +117,17 @@
 #define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */
 #define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */
 #define MEI_DEV_ID_WPT_LP_2   0x9CBB  /* Wildcat Point LP 2 */
 #define MEI_DEV_ID_WPT_LP_2   0x9CBB  /* Wildcat Point LP 2 */
 
 
+#define MEI_DEV_ID_SPT        0x9D3A  /* Sunrise Point */
+#define MEI_DEV_ID_SPT_2      0x9D3B  /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_H      0xA13A  /* Sunrise Point H */
+#define MEI_DEV_ID_SPT_H_2    0xA13B  /* Sunrise Point H 2 */
 /*
 /*
  * MEI HW Section
  * MEI HW Section
  */
  */
 
 
 /* Host Firmware Status Registers in PCI Config Space */
 /* Host Firmware Status Registers in PCI Config Space */
 #define PCI_CFG_HFS_1         0x40
 #define PCI_CFG_HFS_1         0x40
+#  define PCI_CFG_HFS_1_D0I3_MSK     0x80000000
 #define PCI_CFG_HFS_2         0x48
 #define PCI_CFG_HFS_2         0x48
 #define PCI_CFG_HFS_3         0x60
 #define PCI_CFG_HFS_3         0x60
 #define PCI_CFG_HFS_4         0x64
 #define PCI_CFG_HFS_4         0x64
@@ -140,7 +145,8 @@
 #define ME_CSR_HA  0xC
 #define ME_CSR_HA  0xC
 /* H_HGC_CSR - PGI register */
 /* H_HGC_CSR - PGI register */
 #define H_HPG_CSR  0x10
 #define H_HPG_CSR  0x10
-
+/* H_D0I3C - D0I3 Control  */
+#define H_D0I3C    0x800
 
 
 /* register bits of H_CSR (Host Control Status register) */
 /* register bits of H_CSR (Host Control Status register) */
 /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
 /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
@@ -159,7 +165,14 @@
 #define H_IS              0x00000002
 #define H_IS              0x00000002
 /* Host Interrupt Enable */
 /* Host Interrupt Enable */
 #define H_IE              0x00000001
 #define H_IE              0x00000001
+/* Host D0I3 Interrupt Enable */
+#define H_D0I3C_IE        0x00000020
+/* Host D0I3 Interrupt Status */
+#define H_D0I3C_IS        0x00000040
 
 
+/* H_CSR masks */
+#define H_CSR_IE_MASK     (H_IE | H_D0I3C_IE)
+#define H_CSR_IS_MASK     (H_IS | H_D0I3C_IS)
 
 
 /* register bits of ME_CSR_HA (ME Control Status Host Access register) */
 /* register bits of ME_CSR_HA (ME Control Status Host Access register) */
 /* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
 /* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
@@ -183,8 +196,14 @@ access to ME_CBD */
 #define ME_IE_HRA         0x00000001
 #define ME_IE_HRA         0x00000001
 
 
 
 
-/* register bits - H_HPG_CSR */
-#define H_HPG_CSR_PGIHEXR       0x00000001
-#define H_HPG_CSR_PGI           0x00000002
+/* H_HPG_CSR register bits */
+#define H_HPG_CSR_PGIHEXR 0x00000001
+#define H_HPG_CSR_PGI     0x00000002
+
+/* H_D0I3C register bits */
+#define H_D0I3C_CIP      0x00000001
+#define H_D0I3C_IR       0x00000002
+#define H_D0I3C_I3       0x00000004
+#define H_D0I3C_RR       0x00000008
 
 
 #endif /* _MEI_HW_MEI_REGS_H_ */
 #endif /* _MEI_HW_MEI_REGS_H_ */

+ 425 - 74
drivers/misc/mei/hw-me.c

@@ -134,10 +134,39 @@ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  */
  */
 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
 {
 {
-	reg &= ~H_IS;
+	reg &= ~H_CSR_IS_MASK;
 	mei_hcsr_write(dev, reg);
 	mei_hcsr_write(dev, reg);
 }
 }
 
 
+/**
+ * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
+ *
+ * @dev: the device structure
+ *
+ * Return: H_D0I3C register value (u32)
+ */
+static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
+{
+	u32 reg;
+
+	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
+	trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg);
+
+	return reg;
+}
+
+/**
+ * mei_me_d0i3c_write - writes H_D0I3C register to device
+ *
+ * @dev: the device structure
+ * @reg: new register value
+ */
+static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
+{
+	trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg);
+	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
+}
+
 /**
 /**
  * mei_me_fw_status - read fw status register from pci config space
  * mei_me_fw_status - read fw status register from pci config space
  *
  *
@@ -176,12 +205,25 @@ static int mei_me_fw_status(struct mei_device *dev,
  */
  */
 static void mei_me_hw_config(struct mei_device *dev)
 static void mei_me_hw_config(struct mei_device *dev)
 {
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
-	u32 hcsr = mei_hcsr_read(dev);
+	u32 hcsr, reg;
+
 	/* Doesn't change in runtime */
 	/* Doesn't change in runtime */
+	hcsr = mei_hcsr_read(dev);
 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
 
 
+	reg = 0;
+	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+	hw->d0i3_supported =
+		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
+
 	hw->pg_state = MEI_PG_OFF;
 	hw->pg_state = MEI_PG_OFF;
+	if (hw->d0i3_supported) {
+		reg = mei_me_d0i3c_read(dev);
+		if (reg & H_D0I3C_I3)
+			hw->pg_state = MEI_PG_ON;
+	}
 }
 }
 
 
 /**
 /**
@@ -208,7 +250,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
 {
 {
 	u32 hcsr = mei_hcsr_read(dev);
 	u32 hcsr = mei_hcsr_read(dev);
 
 
-	if ((hcsr & H_IS) == H_IS)
+	if (hcsr & H_CSR_IS_MASK)
 		mei_hcsr_write(dev, hcsr);
 		mei_hcsr_write(dev, hcsr);
 }
 }
 /**
 /**
@@ -220,7 +262,7 @@ static void mei_me_intr_enable(struct mei_device *dev)
 {
 {
 	u32 hcsr = mei_hcsr_read(dev);
 	u32 hcsr = mei_hcsr_read(dev);
 
 
-	hcsr |= H_IE;
+	hcsr |= H_CSR_IE_MASK;
 	mei_hcsr_set(dev, hcsr);
 	mei_hcsr_set(dev, hcsr);
 }
 }
 
 
@@ -233,7 +275,7 @@ static void mei_me_intr_disable(struct mei_device *dev)
 {
 {
 	u32 hcsr = mei_hcsr_read(dev);
 	u32 hcsr = mei_hcsr_read(dev);
 
 
-	hcsr  &= ~H_IE;
+	hcsr  &= ~H_CSR_IE_MASK;
 	mei_hcsr_set(dev, hcsr);
 	mei_hcsr_set(dev, hcsr);
 }
 }
 
 
@@ -253,57 +295,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
 	/* complete this write before we set host ready on another CPU */
 	/* complete this write before we set host ready on another CPU */
 	mmiowb();
 	mmiowb();
 }
 }
-/**
- * mei_me_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @intr_enable: if interrupt should be enabled after reset.
- *
- * Return: always 0
- */
-static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
-{
-	u32 hcsr = mei_hcsr_read(dev);
-
-	/* H_RST may be found lit before reset is started,
-	 * for example if preceding reset flow hasn't completed.
-	 * In that case asserting H_RST will be ignored, therefore
-	 * we need to clean H_RST bit to start a successful reset sequence.
-	 */
-	if ((hcsr & H_RST) == H_RST) {
-		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
-		hcsr &= ~H_RST;
-		mei_hcsr_set(dev, hcsr);
-		hcsr = mei_hcsr_read(dev);
-	}
-
-	hcsr |= H_RST | H_IG | H_IS;
-
-	if (intr_enable)
-		hcsr |= H_IE;
-	else
-		hcsr &= ~H_IE;
-
-	dev->recvd_hw_ready = false;
-	mei_hcsr_write(dev, hcsr);
-
-	/*
-	 * Host reads the H_CSR once to ensure that the
-	 * posted write to H_CSR completes.
-	 */
-	hcsr = mei_hcsr_read(dev);
-
-	if ((hcsr & H_RST) == 0)
-		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
-
-	if ((hcsr & H_RDY) == H_RDY)
-		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
-
-	if (intr_enable == false)
-		mei_me_hw_reset_release(dev);
-
-	return 0;
-}
 
 
 /**
 /**
  * mei_me_host_set_ready - enable device
  * mei_me_host_set_ready - enable device
@@ -314,7 +305,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
 {
 {
 	u32 hcsr = mei_hcsr_read(dev);
 	u32 hcsr = mei_hcsr_read(dev);
 
 
-	hcsr |= H_IE | H_IG | H_RDY;
+	hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
 	mei_hcsr_set(dev, hcsr);
 	mei_hcsr_set(dev, hcsr);
 }
 }
 
 
@@ -601,13 +592,13 @@ static void mei_me_pg_unset(struct mei_device *dev)
 }
 }
 
 
 /**
 /**
- * mei_me_pg_enter_sync - perform pg entry procedure
+ * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  *
  *
  * @dev: the device structure
  * @dev: the device structure
  *
  *
  * Return: 0 on success an error code otherwise
  * Return: 0 on success an error code otherwise
  */
  */
-int mei_me_pg_enter_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
 {
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -638,13 +629,13 @@ int mei_me_pg_enter_sync(struct mei_device *dev)
 }
 }
 
 
 /**
 /**
- * mei_me_pg_exit_sync - perform pg exit procedure
+ * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  *
  *
  * @dev: the device structure
  * @dev: the device structure
  *
  *
  * Return: 0 on success an error code otherwise
  * Return: 0 on success an error code otherwise
  */
  */
-int mei_me_pg_exit_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
 {
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -712,8 +703,12 @@ static bool mei_me_pg_in_transition(struct mei_device *dev)
  */
  */
 static bool mei_me_pg_is_enabled(struct mei_device *dev)
 static bool mei_me_pg_is_enabled(struct mei_device *dev)
 {
 {
+	struct mei_me_hw *hw = to_me_hw(dev);
 	u32 reg = mei_me_mecsr_read(dev);
 	u32 reg = mei_me_mecsr_read(dev);
 
 
+	if (hw->d0i3_supported)
+		return true;
+
 	if ((reg & ME_PGIC_HRA) == 0)
 	if ((reg & ME_PGIC_HRA) == 0)
 		goto notsupported;
 		goto notsupported;
 
 
@@ -723,7 +718,8 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev)
 	return true;
 	return true;
 
 
 notsupported:
 notsupported:
-	dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
+	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
+		hw->d0i3_supported,
 		!!(reg & ME_PGIC_HRA),
 		!!(reg & ME_PGIC_HRA),
 		dev->version.major_version,
 		dev->version.major_version,
 		dev->version.minor_version,
 		dev->version.minor_version,
@@ -734,11 +730,211 @@ notsupported:
 }
 }
 
 
 /**
 /**
- * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ * mei_me_d0i3_set - write d0i3 register bit on mei device.
  *
  *
  * @dev: the device structure
  * @dev: the device structure
+ * @intr: ask for interrupt
+ *
+ * Return: D0I3C register value
  */
  */
-static void mei_me_pg_intr(struct mei_device *dev)
+static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
+{
+	u32 reg = mei_me_d0i3c_read(dev);
+
+	reg |= H_D0I3C_I3;
+	if (intr)
+		reg |= H_D0I3C_IR;
+	else
+		reg &= ~H_D0I3C_IR;
+	mei_me_d0i3c_write(dev, reg);
+	/* read it to ensure HW consistency */
+	reg = mei_me_d0i3c_read(dev);
+	return reg;
+}
+
+/**
+ * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
+ *
+ * @dev: the device structure
+ *
+ * Return: D0I3C register value
+ */
+static u32 mei_me_d0i3_unset(struct mei_device *dev)
+{
+	u32 reg = mei_me_d0i3c_read(dev);
+
+	reg &= ~H_D0I3C_I3;
+	reg |= H_D0I3C_IR;
+	mei_me_d0i3c_write(dev, reg);
+	/* read it to ensure HW consistency */
+	reg = mei_me_d0i3c_read(dev);
+	return reg;
+}
+
+/**
+ * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+	unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+	int ret;
+	u32 reg;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (reg & H_D0I3C_I3) {
+		/* we are in d0i3, nothing to do */
+		dev_dbg(dev->dev, "d0i3 set not needed\n");
+		ret = 0;
+		goto on;
+	}
+
+	/* PGI entry procedure */
+	dev->pg_event = MEI_PG_EVENT_WAIT;
+
+	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
+	if (ret)
+		/* FIXME: should we reset here? */
+		goto out;
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+		ret = -ETIME;
+		goto out;
+	}
+	/* end PGI entry procedure */
+
+	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+	reg = mei_me_d0i3_set(dev, true);
+	if (!(reg & H_D0I3C_CIP)) {
+		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
+		ret = 0;
+		goto on;
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+		reg = mei_me_d0i3c_read(dev);
+		if (!(reg & H_D0I3C_I3)) {
+			ret = -ETIME;
+			goto out;
+		}
+	}
+
+	ret = 0;
+on:
+	hw->pg_state = MEI_PG_ON;
+out:
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
+	return ret;
+}
+
+/**
+ * mei_me_d0i3_enter - perform d0i3 entry procedure
+ *   no hbm PG handshake
+ *   no waiting for confirmation; runs with interrupts
+ *   disabled
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 reg;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (reg & H_D0I3C_I3) {
+		/* we are in d0i3, nothing to do */
+		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
+		goto on;
+	}
+
+	mei_me_d0i3_set(dev, false);
+on:
+	hw->pg_state = MEI_PG_ON;
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+	dev_dbg(dev->dev, "d0i3 enter\n");
+	return 0;
+}
+
+/**
+ * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_exit_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+	int ret;
+	u32 reg;
+
+	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (!(reg & H_D0I3C_I3)) {
+		/* we are not in d0i3, nothing to do */
+		dev_dbg(dev->dev, "d0i3 exit not needed\n");
+		ret = 0;
+		goto off;
+	}
+
+	reg = mei_me_d0i3_unset(dev);
+	if (!(reg & H_D0I3C_CIP)) {
+		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
+		ret = 0;
+		goto off;
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+		reg = mei_me_d0i3c_read(dev);
+		if (reg & H_D0I3C_I3) {
+			ret = -ETIME;
+			goto out;
+		}
+	}
+
+	ret = 0;
+off:
+	hw->pg_state = MEI_PG_OFF;
+out:
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+
+	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
+	return ret;
+}
+
+/**
+ * mei_me_pg_legacy_intr - perform legacy pg processing
+ *			   in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_legacy_intr(struct mei_device *dev)
 {
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
 
 
@@ -751,6 +947,162 @@ static void mei_me_pg_intr(struct mei_device *dev)
 		wake_up(&dev->wait_pg);
 		wake_up(&dev->wait_pg);
 }
 }
 
 
+/**
+ * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_d0i3_intr(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
+	    (hw->intr_source & H_D0I3C_IS)) {
+		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+		if (hw->pg_state == MEI_PG_ON) {
+			hw->pg_state = MEI_PG_OFF;
+			if (dev->hbm_state != MEI_HBM_IDLE) {
+				/*
+				 * force H_RDY because it could be
+				 * wiped off during PG
+				 */
+				dev_dbg(dev->dev, "d0i3 set host ready\n");
+				mei_me_host_set_ready(dev);
+			}
+		} else {
+			hw->pg_state = MEI_PG_ON;
+		}
+
+		wake_up(&dev->wait_pg);
+	}
+
+	if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+		/*
+		 * HW sent some data and we are in D0i3, so
+		 * we got here because of HW initiated exit from D0i3.
+		 * Start runtime pm resume sequence to exit low power state.
+		 */
+		dev_dbg(dev->dev, "d0i3 want resume\n");
+		mei_hbm_pg_resume(dev);
+	}
+}
+
+/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		mei_me_d0i3_intr(dev);
+	else
+		mei_me_pg_legacy_intr(dev);
+}
+
+/**
+ * mei_me_pg_enter_sync - perform runtime pm entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_enter_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		return mei_me_d0i3_enter_sync(dev);
+	else
+		return mei_me_pg_legacy_enter_sync(dev);
+}
+
+/**
+ * mei_me_pg_exit_sync - perform runtime pm exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_exit_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		return mei_me_d0i3_exit_sync(dev);
+	else
+		return mei_me_pg_legacy_exit_sync(dev);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	int ret;
+	u32 hcsr;
+
+	if (intr_enable) {
+		mei_me_intr_enable(dev);
+		if (hw->d0i3_supported) {
+			ret = mei_me_d0i3_exit_sync(dev);
+			if (ret)
+				return ret;
+		}
+	}
+
+	hcsr = mei_hcsr_read(dev);
+	/* H_RST may be found lit before reset is started,
+	 * for example if preceding reset flow hasn't completed.
+	 * In that case asserting H_RST will be ignored, therefore
+	 * we need to clean H_RST bit to start a successful reset sequence.
+	 */
+	if ((hcsr & H_RST) == H_RST) {
+		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+		hcsr &= ~H_RST;
+		mei_hcsr_set(dev, hcsr);
+		hcsr = mei_hcsr_read(dev);
+	}
+
+	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
+
+	if (!intr_enable)
+		hcsr &= ~H_CSR_IE_MASK;
+
+	dev->recvd_hw_ready = false;
+	mei_hcsr_write(dev, hcsr);
+
+	/*
+	 * Host reads the H_CSR once to ensure that the
+	 * posted write to H_CSR completes.
+	 */
+	hcsr = mei_hcsr_read(dev);
+
+	if ((hcsr & H_RST) == 0)
+		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
+
+	if ((hcsr & H_RDY) == H_RDY)
+		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+
+	if (!intr_enable) {
+		mei_me_hw_reset_release(dev);
+		if (hw->d0i3_supported) {
+			ret = mei_me_d0i3_enter(dev);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
 /**
 /**
  * mei_me_irq_quick_handler - The ISR of the MEI device
  * mei_me_irq_quick_handler - The ISR of the MEI device
  *
  *
@@ -759,16 +1111,20 @@ static void mei_me_pg_intr(struct mei_device *dev)
  *
  *
  * Return: irqreturn_t
  * Return: irqreturn_t
  */
  */
-
 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
 {
 {
-	struct mei_device *dev = (struct mei_device *) dev_id;
-	u32 hcsr = mei_hcsr_read(dev);
+	struct mei_device *dev = (struct mei_device *)dev_id;
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr;
 
 
-	if ((hcsr & H_IS) != H_IS)
+	hcsr = mei_hcsr_read(dev);
+	if (!(hcsr & H_CSR_IS_MASK))
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
-	/* clear H_IS bit in H_CSR */
+	hw->intr_source = hcsr & H_CSR_IS_MASK;
+	dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
+
+	/* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
 	mei_hcsr_write(dev, hcsr);
 	mei_hcsr_write(dev, hcsr);
 
 
 	return IRQ_WAKE_THREAD;
 	return IRQ_WAKE_THREAD;
@@ -796,11 +1152,6 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 	mutex_lock(&dev->device_lock);
 	mutex_lock(&dev->device_lock);
 	mei_io_list_init(&complete_list);
 	mei_io_list_init(&complete_list);
 
 
-	/* Ack the interrupt here
-	 * In case of MSI we don't go through the quick handler */
-	if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
-		mei_clear_interrupts(dev);
-
 	/* check if ME wants a reset */
 	/* check if ME wants a reset */
 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
 		dev_warn(dev->dev, "FW not ready: resetting.\n");
 		dev_warn(dev->dev, "FW not ready: resetting.\n");

+ 6 - 2
drivers/misc/mei/hw-me.h

@@ -50,13 +50,17 @@ struct mei_cfg {
  * struct mei_me_hw - me hw specific data
  * struct mei_me_hw - me hw specific data
  *
  *
  * @cfg: per device generation config and ops
  * @cfg: per device generation config and ops
- * @mem_addr:  io memory address
- * @pg_state:      power gating state
+ * @mem_addr: io memory address
+ * @intr_source: interrupt source
+ * @pg_state: power gating state
+ * @d0i3_supported: di03 support
  */
  */
 struct mei_me_hw {
 struct mei_me_hw {
 	const struct mei_cfg *cfg;
 	const struct mei_cfg *cfg;
 	void __iomem *mem_addr;
 	void __iomem *mem_addr;
+	u32 intr_source;
 	enum mei_pg_state pg_state;
 	enum mei_pg_state pg_state;
+	bool d0i3_supported;
 };
 };
 
 
 #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
 #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)

+ 129 - 5
drivers/misc/mei/hw.h

@@ -31,14 +31,15 @@
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
 #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
 
-#define MEI_PGI_TIMEOUT            1  /* PG Isolation time response 1 sec */
-#define MEI_HBM_TIMEOUT            1   /* 1 second */
+#define MEI_PGI_TIMEOUT             1  /* PG Isolation time response 1 sec */
+#define MEI_D0I3_TIMEOUT            5  /* D0i3 set/unset max response time */
+#define MEI_HBM_TIMEOUT             1  /* 1 second */
 
 
 /*
 /*
  * MEI Version
  * MEI Version
  */
  */
-#define HBM_MINOR_VERSION                   1
-#define HBM_MAJOR_VERSION                   1
+#define HBM_MINOR_VERSION                   0
+#define HBM_MAJOR_VERSION                   2
 
 
 /*
 /*
  * MEI version with PGI support
  * MEI version with PGI support
@@ -46,6 +47,24 @@
 #define HBM_MINOR_VERSION_PGI               1
 #define HBM_MINOR_VERSION_PGI               1
 #define HBM_MAJOR_VERSION_PGI               1
 #define HBM_MAJOR_VERSION_PGI               1
 
 
+/*
+ * MEI version with Dynamic clients support
+ */
+#define HBM_MINOR_VERSION_DC               0
+#define HBM_MAJOR_VERSION_DC               2
+
+/*
+ * MEI version with disconnect on connection timeout support
+ */
+#define HBM_MINOR_VERSION_DOT              0
+#define HBM_MAJOR_VERSION_DOT              2
+
+/*
+ * MEI version with notifcation support
+ */
+#define HBM_MINOR_VERSION_EV               0
+#define HBM_MAJOR_VERSION_EV               2
+
 /* Host bus message command opcode */
 /* Host bus message command opcode */
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 /* Host bus message command RESPONSE */
 /* Host bus message command RESPONSE */
@@ -81,6 +100,13 @@
 #define MEI_PG_ISOLATION_EXIT_REQ_CMD       0x0b
 #define MEI_PG_ISOLATION_EXIT_REQ_CMD       0x0b
 #define MEI_PG_ISOLATION_EXIT_RES_CMD       0x8b
 #define MEI_PG_ISOLATION_EXIT_RES_CMD       0x8b
 
 
+#define MEI_HBM_ADD_CLIENT_REQ_CMD          0x0f
+#define MEI_HBM_ADD_CLIENT_RES_CMD          0x8f
+
+#define MEI_HBM_NOTIFY_REQ_CMD              0x10
+#define MEI_HBM_NOTIFY_RES_CMD              0x90
+#define MEI_HBM_NOTIFICATION_CMD            0x11
+
 /*
 /*
  * MEI Stop Reason
  * MEI Stop Reason
  * used by hbm_host_stop_request.reason
  * used by hbm_host_stop_request.reason
@@ -136,6 +162,7 @@ enum mei_cl_connect_status {
 	MEI_CL_CONN_ALREADY_STARTED  = MEI_HBMS_ALREADY_EXISTS,
 	MEI_CL_CONN_ALREADY_STARTED  = MEI_HBMS_ALREADY_EXISTS,
 	MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED,
 	MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED,
 	MEI_CL_CONN_MESSAGE_SMALL    = MEI_HBMS_INVALID_PARAMETER,
 	MEI_CL_CONN_MESSAGE_SMALL    = MEI_HBMS_INVALID_PARAMETER,
+	MEI_CL_CONN_NOT_ALLOWED      = MEI_HBMS_NOT_ALLOWED,
 };
 };
 
 
 /*
 /*
@@ -213,9 +240,17 @@ struct hbm_me_stop_request {
 	u8 reserved[2];
 	u8 reserved[2];
 } __packed;
 } __packed;
 
 
+/**
+ * struct hbm_host_enum_request -  enumeration request from host to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @reserved: reserved
+ */
 struct hbm_host_enum_request {
 struct hbm_host_enum_request {
 	u8 hbm_cmd;
 	u8 hbm_cmd;
-	u8 reserved[3];
+	u8 allow_add;
+	u8 reserved[2];
 } __packed;
 } __packed;
 
 
 struct hbm_host_enum_response {
 struct hbm_host_enum_response {
@@ -247,6 +282,38 @@ struct hbm_props_response {
 	struct mei_client_properties client_properties;
 	struct mei_client_properties client_properties;
 } __packed;
 } __packed;
 
 
+/**
+ * struct hbm_add_client_request - request to add a client
+ *     might be sent by fw after enumeration has already completed
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @reserved: reserved
+ * @client_properties: client properties
+ */
+struct hbm_add_client_request {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 reserved[2];
+	struct mei_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_add_client_response - response to add a client
+ *     sent by the host to report client addition status to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @status: if HBMS_SUCCESS then the client can now accept connections.
+ * @reserved: reserved
+ */
+struct hbm_add_client_response {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 status;
+	u8 reserved[1];
+} __packed;
+
 /**
 /**
  * struct hbm_power_gate - power gate request/response
  * struct hbm_power_gate - power gate request/response
  *
  *
@@ -298,5 +365,62 @@ struct hbm_flow_control {
 	u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
 	u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
 } __packed;
 } __packed;
 
 
+#define MEI_HBM_NOTIFICATION_START 1
+#define MEI_HBM_NOTIFICATION_STOP  0
+/**
+ * struct hbm_notification_request - start/stop notification request
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: address of the client in the driver
+ * @start:  start = 1 or stop = 0 asynchronous notifications
+ */
+struct hbm_notification_request {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 start;
+} __packed;
+
+/**
+ * struct hbm_notification_response - start/stop notification response
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: - address of the client in the driver
+ * @status: (mei_hbm_status) response status for the request
+ *  - MEI_HBMS_SUCCESS: successful stop/start
+ *  - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found.
+ *  - MEI_HBMS_ALREADY_STARTED: for start requests for a previously
+ *                         started notification.
+ *  - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom
+ *                         asynchronous notifications are currently disabled.
+ *
+ * @start:  start = 1 or stop = 0 asynchronous notifications
+ * @reserved: reserved
+ */
+struct hbm_notification_response {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 status;
+	u8 start;
+	u8 reserved[3];
+} __packed;
+
+/**
+ * struct hbm_notification - notification event
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr:  address of the client in ME
+ * @host_addr:  address of the client in the driver
+ * @reserved: reserved for alignment
+ */
+struct hbm_notification {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 reserved[1];
+} __packed;
 
 
 #endif
 #endif

+ 2 - 1
drivers/misc/mei/init.c

@@ -331,7 +331,7 @@ void mei_stop(struct mei_device *dev)
 
 
 	mei_cancel_work(dev);
 	mei_cancel_work(dev);
 
 
-	mei_nfc_host_exit(dev);
+	mei_cl_bus_remove_devices(dev);
 
 
 	mutex_lock(&dev->device_lock);
 	mutex_lock(&dev->device_lock);
 
 
@@ -390,6 +390,7 @@ void mei_device_init(struct mei_device *dev,
 	INIT_LIST_HEAD(&dev->me_clients);
 	INIT_LIST_HEAD(&dev->me_clients);
 	mutex_init(&dev->device_lock);
 	mutex_init(&dev->device_lock);
 	init_rwsem(&dev->me_clients_rwsem);
 	init_rwsem(&dev->me_clients_rwsem);
+	mutex_init(&dev->cl_bus_lock);
 	init_waitqueue_head(&dev->wait_hw_ready);
 	init_waitqueue_head(&dev->wait_hw_ready);
 	init_waitqueue_head(&dev->wait_pg);
 	init_waitqueue_head(&dev->wait_pg);
 	init_waitqueue_head(&dev->wait_hbm_start);
 	init_waitqueue_head(&dev->wait_hbm_start);

+ 26 - 1
drivers/misc/mei/interrupt.c

@@ -403,6 +403,13 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 			break;
 			break;
+
+		case MEI_FOP_NOTIFY_START:
+		case MEI_FOP_NOTIFY_STOP:
+			ret = mei_cl_irq_notify(cl, cb, cmpl_list);
+			if (ret)
+				return ret;
+			break;
 		default:
 		default:
 			BUG();
 			BUG();
 		}
 		}
@@ -424,6 +431,24 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
 
 
 
 
+/**
+ * mei_connect_timeout  - connect/disconnect timeouts
+ *
+ * @cl: host client
+ */
+static void mei_connect_timeout(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+
+	if (cl->state == MEI_FILE_CONNECTING) {
+		if (dev->hbm_f_dot_supported) {
+			cl->state = MEI_FILE_DISCONNECT_REQUIRED;
+			wake_up(&cl->wait);
+			return;
+		}
+	}
+	mei_reset(dev);
+}
 
 
 /**
 /**
  * mei_timer - timer function.
  * mei_timer - timer function.
@@ -464,7 +489,7 @@ void mei_timer(struct work_struct *work)
 		if (cl->timer_count) {
 		if (cl->timer_count) {
 			if (--cl->timer_count == 0) {
 			if (--cl->timer_count == 0) {
 				dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
 				dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
-				mei_reset(dev);
+				mei_connect_timeout(cl);
 				goto out;
 				goto out;
 			}
 			}
 		}
 		}

+ 96 - 0
drivers/misc/mei/main.c

@@ -445,6 +445,45 @@ end:
 	return rets;
 	return rets;
 }
 }
 
 
+/**
+ * mei_ioctl_client_notify_request -
+ *     propagate event notification request to client
+ *
+ * @file: pointer to file structure
+ * @request: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+{
+	struct mei_cl *cl = file->private_data;
+
+	return mei_cl_notify_request(cl, file, request);
+}
+
+/**
+ * mei_ioctl_client_notify_get -  wait for notification request
+ *
+ * @file: pointer to file structure
+ * @notify_get: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+{
+	struct mei_cl *cl = file->private_data;
+	bool notify_ev;
+	bool block = (file->f_flags & O_NONBLOCK) == 0;
+	int rets;
+
+	rets = mei_cl_notify_get(cl, block, &notify_ev);
+	if (rets)
+		return rets;
+
+	*notify_get = notify_ev ? 1 : 0;
+	return 0;
+}
+
 /**
 /**
  * mei_ioctl - the IOCTL function
  * mei_ioctl - the IOCTL function
  *
  *
@@ -459,6 +498,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
 	struct mei_device *dev;
 	struct mei_device *dev;
 	struct mei_cl *cl = file->private_data;
 	struct mei_cl *cl = file->private_data;
 	struct mei_connect_client_data connect_data;
 	struct mei_connect_client_data connect_data;
+	u32 notify_get, notify_req;
 	int rets;
 	int rets;
 
 
 
 
@@ -499,6 +539,33 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
 
 
 		break;
 		break;
 
 
+	case IOCTL_MEI_NOTIFY_SET:
+		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
+		if (copy_from_user(&notify_req,
+				   (char __user *)data, sizeof(notify_req))) {
+			dev_dbg(dev->dev, "failed to copy data from userland\n");
+			rets = -EFAULT;
+			goto out;
+		}
+		rets = mei_ioctl_client_notify_request(file, notify_req);
+		break;
+
+	case IOCTL_MEI_NOTIFY_GET:
+		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
+		rets = mei_ioctl_client_notify_get(file, &notify_get);
+		if (rets)
+			goto out;
+
+		dev_dbg(dev->dev, "copy connect data to user\n");
+		if (copy_to_user((char __user *)data,
+				&notify_get, sizeof(notify_get))) {
+			dev_dbg(dev->dev, "failed to copy data to userland\n");
+			rets = -EFAULT;
+			goto out;
+
+		}
+		break;
+
 	default:
 	default:
 		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
 		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
 		rets = -ENOIOCTLCMD;
 		rets = -ENOIOCTLCMD;
@@ -541,6 +608,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
 	struct mei_cl *cl = file->private_data;
 	struct mei_cl *cl = file->private_data;
 	struct mei_device *dev;
 	struct mei_device *dev;
 	unsigned int mask = 0;
 	unsigned int mask = 0;
+	bool notify_en;
 
 
 	if (WARN_ON(!cl || !cl->dev))
 	if (WARN_ON(!cl || !cl->dev))
 		return POLLERR;
 		return POLLERR;
@@ -549,6 +617,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
 
 
 	mutex_lock(&dev->device_lock);
 	mutex_lock(&dev->device_lock);
 
 
+	notify_en = cl->notify_en && (req_events & POLLPRI);
 
 
 	if (dev->dev_state != MEI_DEV_ENABLED ||
 	if (dev->dev_state != MEI_DEV_ENABLED ||
 	    !mei_cl_is_connected(cl)) {
 	    !mei_cl_is_connected(cl)) {
@@ -561,6 +630,12 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
 		goto out;
 		goto out;
 	}
 	}
 
 
+	if (notify_en) {
+		poll_wait(file, &cl->ev_wait, wait);
+		if (cl->notify_ev)
+			mask |= POLLPRI;
+	}
+
 	if (req_events & (POLLIN | POLLRDNORM)) {
 	if (req_events & (POLLIN | POLLRDNORM)) {
 		poll_wait(file, &cl->rx_wait, wait);
 		poll_wait(file, &cl->rx_wait, wait);
 
 
@@ -575,6 +650,26 @@ out:
 	return mask;
 	return mask;
 }
 }
 
 
+/**
+ * mei_fasync - asynchronous io support
+ *
+ * @fd: file descriptor
+ * @file: pointer to file structure
+ * @band: band bitmap
+ *
+ * Return: poll mask
+ */
+static int mei_fasync(int fd, struct file *file, int band)
+{
+
+	struct mei_cl *cl = file->private_data;
+
+	if (!mei_cl_is_connected(cl))
+		return POLLERR;
+
+	return fasync_helper(fd, file, band, &cl->ev_async);
+}
+
 /**
 /**
  * fw_status_show - mei device attribute show method
  * fw_status_show - mei device attribute show method
  *
  *
@@ -627,6 +722,7 @@ static const struct file_operations mei_fops = {
 	.release = mei_release,
 	.release = mei_release,
 	.write = mei_write,
 	.write = mei_write,
 	.poll = mei_poll,
 	.poll = mei_poll,
+	.fasync = mei_fasync,
 	.llseek = no_llseek
 	.llseek = no_llseek
 };
 };
 
 

+ 30 - 17
drivers/misc/mei/mei_dev.h

@@ -89,6 +89,7 @@ enum file_state {
 	MEI_FILE_CONNECTED,
 	MEI_FILE_CONNECTED,
 	MEI_FILE_DISCONNECTING,
 	MEI_FILE_DISCONNECTING,
 	MEI_FILE_DISCONNECT_REPLY,
 	MEI_FILE_DISCONNECT_REPLY,
+	MEI_FILE_DISCONNECT_REQUIRED,
 	MEI_FILE_DISCONNECTED,
 	MEI_FILE_DISCONNECTED,
 };
 };
 
 
@@ -135,6 +136,8 @@ enum mei_wd_states {
  * @MEI_FOP_CONNECT:    connect
  * @MEI_FOP_CONNECT:    connect
  * @MEI_FOP_DISCONNECT: disconnect
  * @MEI_FOP_DISCONNECT: disconnect
  * @MEI_FOP_DISCONNECT_RSP: disconnect response
  * @MEI_FOP_DISCONNECT_RSP: disconnect response
+ * @MEI_FOP_NOTIFY_START:   start notification
+ * @MEI_FOP_NOTIFY_STOP:    stop notification
  */
  */
 enum mei_cb_file_ops {
 enum mei_cb_file_ops {
 	MEI_FOP_READ = 0,
 	MEI_FOP_READ = 0,
@@ -142,6 +145,8 @@ enum mei_cb_file_ops {
 	MEI_FOP_CONNECT,
 	MEI_FOP_CONNECT,
 	MEI_FOP_DISCONNECT,
 	MEI_FOP_DISCONNECT,
 	MEI_FOP_DISCONNECT_RSP,
 	MEI_FOP_DISCONNECT_RSP,
+	MEI_FOP_NOTIFY_START,
+	MEI_FOP_NOTIFY_STOP,
 };
 };
 
 
 /*
 /*
@@ -178,7 +183,7 @@ struct mei_fw_status {
  * @client_id: me client id
  * @client_id: me client id
  * @mei_flow_ctrl_creds: flow control credits
  * @mei_flow_ctrl_creds: flow control credits
  * @connect_count: number connections to this client
  * @connect_count: number connections to this client
- * @reserved: reserved
+ * @bus_added: added to bus
  */
  */
 struct mei_me_client {
 struct mei_me_client {
 	struct list_head list;
 	struct list_head list;
@@ -187,7 +192,7 @@ struct mei_me_client {
 	u8 client_id;
 	u8 client_id;
 	u8 mei_flow_ctrl_creds;
 	u8 mei_flow_ctrl_creds;
 	u8 connect_count;
 	u8 connect_count;
-	u8 reserved;
+	u8 bus_added;
 };
 };
 
 
 
 
@@ -230,18 +235,21 @@ struct mei_cl_cb {
  * @tx_wait: wait queue for tx completion
  * @tx_wait: wait queue for tx completion
  * @rx_wait: wait queue for rx completion
  * @rx_wait: wait queue for rx completion
  * @wait:  wait queue for management operation
  * @wait:  wait queue for management operation
+ * @ev_wait: notification wait queue
+ * @ev_async: event async notification
  * @status: connection status
  * @status: connection status
  * @me_cl: fw client connected
  * @me_cl: fw client connected
  * @host_client_id: host id
  * @host_client_id: host id
  * @mei_flow_ctrl_creds: transmit flow credentials
  * @mei_flow_ctrl_creds: transmit flow credentials
  * @timer_count:  watchdog timer for operation completion
  * @timer_count:  watchdog timer for operation completion
  * @reserved: reserved for alignment
  * @reserved: reserved for alignment
+ * @notify_en: notification - enabled/disabled
+ * @notify_ev: pending notification event
  * @writing_state: state of the tx
  * @writing_state: state of the tx
  * @rd_pending: pending read credits
  * @rd_pending: pending read credits
  * @rd_completed: completed read
  * @rd_completed: completed read
  *
  *
- * @device: device on the mei client bus
- * @device_link:  link to bus clients
+ * @cldev: device on the mei client bus
  */
  */
 struct mei_cl {
 struct mei_cl {
 	struct list_head link;
 	struct list_head link;
@@ -250,19 +258,21 @@ struct mei_cl {
 	wait_queue_head_t tx_wait;
 	wait_queue_head_t tx_wait;
 	wait_queue_head_t rx_wait;
 	wait_queue_head_t rx_wait;
 	wait_queue_head_t wait;
 	wait_queue_head_t wait;
+	wait_queue_head_t ev_wait;
+	struct fasync_struct *ev_async;
 	int status;
 	int status;
 	struct mei_me_client *me_cl;
 	struct mei_me_client *me_cl;
 	u8 host_client_id;
 	u8 host_client_id;
 	u8 mei_flow_ctrl_creds;
 	u8 mei_flow_ctrl_creds;
 	u8 timer_count;
 	u8 timer_count;
 	u8 reserved;
 	u8 reserved;
+	u8 notify_en;
+	u8 notify_ev;
 	enum mei_file_transaction_states writing_state;
 	enum mei_file_transaction_states writing_state;
 	struct list_head rd_pending;
 	struct list_head rd_pending;
 	struct list_head rd_completed;
 	struct list_head rd_completed;
 
 
-	/* MEI CL bus data */
-	struct mei_cl_device *device;
-	struct list_head device_link;
+	struct mei_cl_device *cldev;
 };
 };
 
 
 /** struct mei_hw_ops
 /** struct mei_hw_ops
@@ -329,21 +339,16 @@ struct mei_hw_ops {
 };
 };
 
 
 /* MEI bus API*/
 /* MEI bus API*/
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
-					struct mei_me_client *me_cl,
-					struct mei_cl *cl,
-					char *name);
-void mei_cl_remove_device(struct mei_cl_device *device);
-
+void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking);
 			bool blocking);
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
 void mei_cl_bus_rx_event(struct mei_cl *cl);
 void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_remove_devices(struct mei_device *dev);
+void mei_cl_bus_notify_event(struct mei_cl *cl);
+void mei_cl_bus_remove_devices(struct mei_device *bus);
 int mei_cl_bus_init(void);
 int mei_cl_bus_init(void);
 void mei_cl_bus_exit(void);
 void mei_cl_bus_exit(void);
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
 
 
 /**
 /**
  * enum mei_pg_event - power gating transition events
  * enum mei_pg_event - power gating transition events
@@ -416,7 +421,10 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @wr_msg      : the buffer for hbm control messages
  * @wr_msg      : the buffer for hbm control messages
  *
  *
  * @version     : HBM protocol version in use
  * @version     : HBM protocol version in use
- * @hbm_f_pg_supported : hbm feature pgi protocol
+ * @hbm_f_pg_supported  : hbm feature pgi protocol
+ * @hbm_f_dc_supported  : hbm feature dynamic clients
+ * @hbm_f_dot_supported : hbm feature disconnect on timeout
+ * @hbm_f_ev_supported  : hbm feature event notification
  *
  *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients  : list of FW clients
@@ -447,6 +455,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @reset_work  : work item for the device reset
  * @reset_work  : work item for the device reset
  *
  *
  * @device_list : mei client bus list
  * @device_list : mei client bus list
+ * @cl_bus_lock : client bus list lock
  *
  *
  * @dbgfs_dir   : debugfs mei root directory
  * @dbgfs_dir   : debugfs mei root directory
  *
  *
@@ -509,6 +518,9 @@ struct mei_device {
 
 
 	struct hbm_version version;
 	struct hbm_version version;
 	unsigned int hbm_f_pg_supported:1;
 	unsigned int hbm_f_pg_supported:1;
+	unsigned int hbm_f_dc_supported:1;
+	unsigned int hbm_f_dot_supported:1;
+	unsigned int hbm_f_ev_supported:1;
 
 
 	struct rw_semaphore me_clients_rwsem;
 	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
 	struct list_head me_clients;
@@ -543,6 +555,7 @@ struct mei_device {
 
 
 	/* List of bus devices */
 	/* List of bus devices */
 	struct list_head device_list;
 	struct list_head device_list;
+	struct mutex cl_bus_lock;
 
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbgfs_dir;
 	struct dentry *dbgfs_dir;

+ 0 - 415
drivers/misc/mei/nfc.c

@@ -1,415 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include <linux/mei_cl_bus.h>
-
-#include "mei_dev.h"
-#include "client.h"
-
-struct mei_nfc_cmd {
-	u8 command;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-	u8 sub_command;
-	u8 data[];
-} __packed;
-
-struct mei_nfc_reply {
-	u8 command;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-	u8 sub_command;
-	u8 reply_status;
-	u8 data[];
-} __packed;
-
-struct mei_nfc_if_version {
-	u8 radio_version_sw[3];
-	u8 reserved[3];
-	u8 radio_version_hw[3];
-	u8 i2c_addr;
-	u8 fw_ivn;
-	u8 vendor_id;
-	u8 radio_type;
-} __packed;
-
-struct mei_nfc_connect {
-	u8 fw_ivn;
-	u8 vendor_id;
-} __packed;
-
-struct mei_nfc_connect_resp {
-	u8 fw_ivn;
-	u8 vendor_id;
-	u16 me_major;
-	u16 me_minor;
-	u16 me_hotfix;
-	u16 me_build;
-} __packed;
-
-struct mei_nfc_hci_hdr {
-	u8 cmd;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-} __packed;
-
-#define MEI_NFC_CMD_MAINTENANCE 0x00
-#define MEI_NFC_CMD_HCI_SEND 0x01
-#define MEI_NFC_CMD_HCI_RECV 0x02
-
-#define MEI_NFC_SUBCMD_CONNECT    0x00
-#define MEI_NFC_SUBCMD_IF_VERSION 0x01
-
-#define MEI_NFC_HEADER_SIZE 10
-
-/**
- * struct mei_nfc_dev - NFC mei device
- *
- * @me_cl: NFC me client
- * @cl: NFC host client
- * @cl_info: NFC info host client
- * @init_work: perform connection to the info client
- * @fw_ivn: NFC Interface Version Number
- * @vendor_id: NFC manufacturer ID
- * @radio_type: NFC radio type
- * @bus_name: bus name
- *
- */
-struct mei_nfc_dev {
-	struct mei_me_client *me_cl;
-	struct mei_cl *cl;
-	struct mei_cl *cl_info;
-	struct work_struct init_work;
-	u8 fw_ivn;
-	u8 vendor_id;
-	u8 radio_type;
-	char *bus_name;
-};
-
-/* UUIDs for NFC F/W clients */
-const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
-				     0x94, 0xd4, 0x50, 0x26,
-				     0x67, 0x23, 0x77, 0x5c);
-
-static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
-					0x48, 0xa4, 0xef, 0xab,
-					0xba, 0x8a, 0x12, 0x06);
-
-/* Vendors */
-#define MEI_NFC_VENDOR_INSIDE 0x00
-#define MEI_NFC_VENDOR_NXP    0x01
-
-/* Radio types */
-#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
-#define MEI_NFC_VENDOR_NXP_PN544    0x01
-
-static void mei_nfc_free(struct mei_nfc_dev *ndev)
-{
-	if (!ndev)
-		return;
-
-	if (ndev->cl) {
-		list_del(&ndev->cl->device_link);
-		mei_cl_unlink(ndev->cl);
-		kfree(ndev->cl);
-	}
-
-	if (ndev->cl_info) {
-		list_del(&ndev->cl_info->device_link);
-		mei_cl_unlink(ndev->cl_info);
-		kfree(ndev->cl_info);
-	}
-
-	mei_me_cl_put(ndev->me_cl);
-	kfree(ndev);
-}
-
-static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
-{
-	struct mei_device *dev;
-
-	if (!ndev->cl)
-		return -ENODEV;
-
-	dev = ndev->cl->dev;
-
-	switch (ndev->vendor_id) {
-	case MEI_NFC_VENDOR_INSIDE:
-		switch (ndev->radio_type) {
-		case MEI_NFC_VENDOR_INSIDE_UREAD:
-			ndev->bus_name = "microread";
-			return 0;
-
-		default:
-			dev_err(dev->dev, "Unknown radio type 0x%x\n",
-				ndev->radio_type);
-
-			return -EINVAL;
-		}
-
-	case MEI_NFC_VENDOR_NXP:
-		switch (ndev->radio_type) {
-		case MEI_NFC_VENDOR_NXP_PN544:
-			ndev->bus_name = "pn544";
-			return 0;
-		default:
-			dev_err(dev->dev, "Unknown radio type 0x%x\n",
-				ndev->radio_type);
-
-			return -EINVAL;
-		}
-
-	default:
-		dev_err(dev->dev, "Unknown vendor ID 0x%x\n",
-			ndev->vendor_id);
-
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
-{
-	struct mei_device *dev;
-	struct mei_cl *cl;
-
-	struct mei_nfc_cmd cmd;
-	struct mei_nfc_reply *reply = NULL;
-	struct mei_nfc_if_version *version;
-	size_t if_version_length;
-	int bytes_recv, ret;
-
-	cl = ndev->cl_info;
-	dev = cl->dev;
-
-	memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
-	cmd.command = MEI_NFC_CMD_MAINTENANCE;
-	cmd.data_size = 1;
-	cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
-
-	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
-	if (ret < 0) {
-		dev_err(dev->dev, "Could not send IF version cmd\n");
-		return ret;
-	}
-
-	/* to be sure on the stack we alloc memory */
-	if_version_length = sizeof(struct mei_nfc_reply) +
-		sizeof(struct mei_nfc_if_version);
-
-	reply = kzalloc(if_version_length, GFP_KERNEL);
-	if (!reply)
-		return -ENOMEM;
-
-	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
-	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
-		dev_err(dev->dev, "Could not read IF version\n");
-		ret = -EIO;
-		goto err;
-	}
-
-	version = (struct mei_nfc_if_version *)reply->data;
-
-	ndev->fw_ivn = version->fw_ivn;
-	ndev->vendor_id = version->vendor_id;
-	ndev->radio_type = version->radio_type;
-
-err:
-	kfree(reply);
-	return ret;
-}
-
-static void mei_nfc_init(struct work_struct *work)
-{
-	struct mei_device *dev;
-	struct mei_cl_device *cldev;
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl_info;
-	struct mei_me_client *me_cl_info;
-
-	ndev = container_of(work, struct mei_nfc_dev, init_work);
-
-	cl_info = ndev->cl_info;
-	dev = cl_info->dev;
-
-	mutex_lock(&dev->device_lock);
-
-	/* check for valid client id */
-	me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
-	if (!me_cl_info) {
-		mutex_unlock(&dev->device_lock);
-		dev_info(dev->dev, "nfc: failed to find the info client\n");
-		goto err;
-	}
-
-	if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
-		mei_me_cl_put(me_cl_info);
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
-
-		goto err;
-	}
-	mei_me_cl_put(me_cl_info);
-	mutex_unlock(&dev->device_lock);
-
-	if (mei_nfc_if_version(ndev) < 0) {
-		dev_err(dev->dev, "Could not get the NFC interface version");
-
-		goto err;
-	}
-
-	dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
-		ndev->fw_ivn, ndev->vendor_id, ndev->radio_type);
-
-	mutex_lock(&dev->device_lock);
-
-	if (mei_cl_disconnect(cl_info) < 0) {
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not disconnect the NFC INFO ME client");
-
-		goto err;
-	}
-
-	mutex_unlock(&dev->device_lock);
-
-	if (mei_nfc_build_bus_name(ndev) < 0) {
-		dev_err(dev->dev, "Could not build the bus ID name\n");
-		return;
-	}
-
-	cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
-				  ndev->bus_name);
-	if (!cldev) {
-		dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
-
-		goto err;
-	}
-
-	cldev->priv_data = ndev;
-
-
-	return;
-
-err:
-	mutex_lock(&dev->device_lock);
-	mei_nfc_free(ndev);
-	mutex_unlock(&dev->device_lock);
-
-}
-
-
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl_info, *cl;
-	int ret;
-
-
-	/* in case of internal reset bail out
-	 * as the device is already setup
-	 */
-	cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
-	if (cl)
-		return 0;
-
-	ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
-	if (!ndev) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ndev->me_cl = mei_me_cl_get(me_cl);
-	if (!ndev->me_cl) {
-		ret = -ENODEV;
-		goto err;
-	}
-
-	cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
-	if (IS_ERR(cl_info)) {
-		ret = PTR_ERR(cl_info);
-		goto err;
-	}
-
-	list_add_tail(&cl_info->device_link, &dev->device_list);
-
-	ndev->cl_info = cl_info;
-
-	cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
-	if (IS_ERR(cl)) {
-		ret = PTR_ERR(cl);
-		goto err;
-	}
-
-	list_add_tail(&cl->device_link, &dev->device_list);
-
-	ndev->cl = cl;
-
-	INIT_WORK(&ndev->init_work, mei_nfc_init);
-	schedule_work(&ndev->init_work);
-
-	return 0;
-
-err:
-	mei_nfc_free(ndev);
-
-	return ret;
-}
-
-void mei_nfc_host_exit(struct mei_device *dev)
-{
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl;
-	struct mei_cl_device *cldev;
-
-	cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
-	if (!cl)
-		return;
-
-	cldev = cl->device;
-	if (!cldev)
-		return;
-
-	ndev = (struct mei_nfc_dev *)cldev->priv_data;
-	if (ndev)
-		cancel_work_sync(&ndev->init_work);
-
-	cldev->priv_data = NULL;
-
-	/* Need to remove the device here
-	 * since mei_nfc_free will unlink the clients
-	 */
-	mei_cl_remove_device(cldev);
-
-	mutex_lock(&dev->device_lock);
-	mei_nfc_free(ndev);
-	mutex_unlock(&dev->device_lock);
-}
-
-

+ 15 - 17
drivers/misc/mei/pci-me.c

@@ -82,6 +82,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
 
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+
 	/* required last entry */
 	/* required last entry */
 	{0, }
 	{0, }
 };
 };
@@ -128,6 +133,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
 	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
 	struct mei_device *dev;
 	struct mei_device *dev;
 	struct mei_me_hw *hw;
 	struct mei_me_hw *hw;
+	unsigned int irqflags;
 	int err;
 	int err;
 
 
 
 
@@ -180,17 +186,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	pci_enable_msi(pdev);
 	pci_enable_msi(pdev);
 
 
 	 /* request and enable interrupt */
 	 /* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_me_irq_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
+	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
+	err = request_threaded_irq(pdev->irq,
 			mei_me_irq_quick_handler,
 			mei_me_irq_quick_handler,
 			mei_me_irq_thread_handler,
 			mei_me_irq_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
-
+			irqflags, KBUILD_MODNAME, dev);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
 		       pdev->irq);
 		       pdev->irq);
@@ -319,6 +320,7 @@ static int mei_me_pci_resume(struct device *device)
 {
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct mei_device *dev;
 	struct mei_device *dev;
+	unsigned int irqflags;
 	int err;
 	int err;
 
 
 	dev = pci_get_drvdata(pdev);
 	dev = pci_get_drvdata(pdev);
@@ -327,17 +329,13 @@ static int mei_me_pci_resume(struct device *device)
 
 
 	pci_enable_msi(pdev);
 	pci_enable_msi(pdev);
 
 
+	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
 	/* request and enable interrupt */
 	/* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_me_irq_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
+	err = request_threaded_irq(pdev->irq,
 			mei_me_irq_quick_handler,
 			mei_me_irq_quick_handler,
 			mei_me_irq_thread_handler,
 			mei_me_irq_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
+			irqflags, KBUILD_MODNAME, dev);
 
 
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",

+ 152 - 0
drivers/misc/qcom-coincell.c

@@ -0,0 +1,152 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct qcom_coincell {
+	struct device	*dev;
+	struct regmap	*regmap;
+	u32		base_addr;
+};
+
+#define QCOM_COINCELL_REG_RSET		0x44
+#define QCOM_COINCELL_REG_VSET		0x45
+#define QCOM_COINCELL_REG_ENABLE	0x46
+
+#define QCOM_COINCELL_ENABLE		BIT(7)
+
+static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 };
+static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 };
+/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */
+
+/* if enable==0, rset and vset are ignored */
+static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset,
+				     int vset, bool enable)
+{
+	int i, j, rc;
+
+	/* if disabling, just do that and skip other operations */
+	if (!enable)
+		return regmap_write(chgr->regmap,
+			  chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0);
+
+	/* find index for current-limiting resistor */
+	for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++)
+		if (rset == qcom_rset_map[i])
+			break;
+
+	if (i >= ARRAY_SIZE(qcom_rset_map)) {
+		dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset);
+		return -EINVAL;
+	}
+
+	/* find index for charge voltage */
+	for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++)
+		if (vset == qcom_vset_map[j])
+			break;
+
+	if (j >= ARRAY_SIZE(qcom_vset_map)) {
+		dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset);
+		return -EINVAL;
+	}
+
+	rc = regmap_write(chgr->regmap,
+			  chgr->base_addr + QCOM_COINCELL_REG_RSET, i);
+	if (rc) {
+		/*
+		 * This is mainly to flag a bad base_addr (reg) from dts.
+		 * Other failures writing to the registers should be
+		 * extremely rare, or indicative of problems that
+		 * should be reported elsewhere (eg. spmi failure).
+		 */
+		dev_err(chgr->dev, "could not write to RSET register\n");
+		return rc;
+	}
+
+	rc = regmap_write(chgr->regmap,
+		chgr->base_addr + QCOM_COINCELL_REG_VSET, j);
+	if (rc)
+		return rc;
+
+	/* set 'enable' register */
+	return regmap_write(chgr->regmap,
+			    chgr->base_addr + QCOM_COINCELL_REG_ENABLE,
+			    QCOM_COINCELL_ENABLE);
+}
+
+static int qcom_coincell_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct qcom_coincell chgr;
+	u32 rset, vset;
+	bool enable;
+	int rc;
+
+	chgr.dev = &pdev->dev;
+
+	chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chgr.regmap) {
+		dev_err(chgr.dev, "Unable to get regmap\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "reg", &chgr.base_addr);
+	if (rc)
+		return rc;
+
+	enable = !of_property_read_bool(node, "qcom,charger-disable");
+
+	if (enable) {
+		rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
+		if (rc) {
+			dev_err(chgr.dev,
+				"can't find 'qcom,rset-ohms' in DT block");
+			return rc;
+		}
+
+		rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
+		if (rc) {
+			dev_err(chgr.dev,
+			    "can't find 'qcom,vset-millivolts' in DT block");
+			return rc;
+		}
+	}
+
+	return qcom_coincell_chgr_config(&chgr, rset, vset, enable);
+}
+
+static const struct of_device_id qcom_coincell_match_table[] = {
+	{ .compatible = "qcom,pm8941-coincell", },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_coincell_match_table);
+
+static struct platform_driver qcom_coincell_driver = {
+	.driver	= {
+		.name		= "qcom-spmi-coincell",
+		.of_match_table	= qcom_coincell_match_table,
+	},
+	.probe		= qcom_coincell_probe,
+};
+
+module_platform_driver(qcom_coincell_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver");
+MODULE_LICENSE("GPL v2");

+ 12 - 93
drivers/misc/ti-st/st_kim.c

@@ -36,8 +36,6 @@
 #include <linux/skbuff.h>
 #include <linux/skbuff.h>
 #include <linux/ti_wilink_st.h>
 #include <linux/ti_wilink_st.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 
 
 #define MAX_ST_DEVICES	3	/* Imagine 1 on each UART for now */
 #define MAX_ST_DEVICES	3	/* Imagine 1 on each UART for now */
 static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
 static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -45,9 +43,6 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
 /**********************************************************************/
 /**********************************************************************/
 /* internal functions */
 /* internal functions */
 
 
-struct ti_st_plat_data	*dt_pdata;
-static struct ti_st_plat_data *get_platform_data(struct device *dev);
-
 /**
 /**
  * st_get_plat_device -
  * st_get_plat_device -
  *	function which returns the reference to the platform device
  *	function which returns the reference to the platform device
@@ -469,12 +464,7 @@ long st_kim_start(void *kim_data)
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
 
 
 	pr_info(" %s", __func__);
 	pr_info(" %s", __func__);
-	if (kim_gdata->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_gdata->kim_pdev->dev.platform_data;
-	}
+	pdata = kim_gdata->kim_pdev->dev.platform_data;
 
 
 	do {
 	do {
 		/* platform specific enabling code here */
 		/* platform specific enabling code here */
@@ -482,9 +472,9 @@ long st_kim_start(void *kim_data)
 			pdata->chip_enable(kim_gdata);
 			pdata->chip_enable(kim_gdata);
 
 
 		/* Configure BT nShutdown to HIGH state */
 		/* Configure BT nShutdown to HIGH state */
-		gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+		gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 		mdelay(5);	/* FIXME: a proper toggle */
 		mdelay(5);	/* FIXME: a proper toggle */
-		gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+		gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
 		mdelay(100);
 		mdelay(100);
 		/* re-initialize the completion */
 		/* re-initialize the completion */
 		reinit_completion(&kim_gdata->ldisc_installed);
 		reinit_completion(&kim_gdata->ldisc_installed);
@@ -534,18 +524,12 @@ long st_kim_stop(void *kim_data)
 {
 {
 	long err = 0;
 	long err = 0;
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata =
+		kim_gdata->kim_pdev->dev.platform_data;
 	struct tty_struct	*tty = kim_gdata->core_data->tty;
 	struct tty_struct	*tty = kim_gdata->core_data->tty;
 
 
 	reinit_completion(&kim_gdata->ldisc_installed);
 	reinit_completion(&kim_gdata->ldisc_installed);
 
 
-	if (kim_gdata->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else
-		pdata = kim_gdata->kim_pdev->dev.platform_data;
-
-
 	if (tty) {	/* can be called before ldisc is installed */
 	if (tty) {	/* can be called before ldisc is installed */
 		/* Flush any pending characters in the driver and discipline. */
 		/* Flush any pending characters in the driver and discipline. */
 		tty_ldisc_flush(tty);
 		tty_ldisc_flush(tty);
@@ -566,11 +550,11 @@ long st_kim_stop(void *kim_data)
 	}
 	}
 
 
 	/* By default configure BT nShutdown to LOW state */
 	/* By default configure BT nShutdown to LOW state */
-	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 	mdelay(1);
 	mdelay(1);
-	gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
 	mdelay(1);
 	mdelay(1);
-	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 
 
 	/* platform specific disable */
 	/* platform specific disable */
 	if (pdata->chip_disable)
 	if (pdata->chip_disable)
@@ -737,52 +721,13 @@ static const struct file_operations list_debugfs_fops = {
  * board-*.c file
  * board-*.c file
  */
  */
 
 
-static const struct of_device_id kim_of_match[] = {
-{
-	.compatible = "kim",
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, kim_of_match);
-
-static struct ti_st_plat_data *get_platform_data(struct device *dev)
-{
-	struct device_node *np = dev->of_node;
-	const u32 *dt_property;
-	int len;
-
-	dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
-	if (!dt_pdata)
-		return NULL;
-
-	dt_property = of_get_property(np, "dev_name", &len);
-	if (dt_property)
-		memcpy(&dt_pdata->dev_name, dt_property, len);
-	of_property_read_u32(np, "nshutdown_gpio",
-			     &dt_pdata->nshutdown_gpio);
-	of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
-	of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
-
-	return dt_pdata;
-}
-
 static struct dentry *kim_debugfs_dir;
 static struct dentry *kim_debugfs_dir;
 static int kim_probe(struct platform_device *pdev)
 static int kim_probe(struct platform_device *pdev)
 {
 {
 	struct kim_data_s	*kim_gdata;
 	struct kim_data_s	*kim_gdata;
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 	int err;
 	int err;
 
 
-	if (pdev->dev.of_node)
-		pdata = get_platform_data(&pdev->dev);
-	else
-		pdata = pdev->dev.platform_data;
-
-	if (pdata == NULL) {
-		dev_err(&pdev->dev, "Platform Data is missing\n");
-		return -ENXIO;
-	}
-
 	if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
 	if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
 		/* multiple devices could exist */
 		/* multiple devices could exist */
 		st_kim_devices[pdev->id] = pdev;
 		st_kim_devices[pdev->id] = pdev;
@@ -863,16 +808,9 @@ err_core_init:
 static int kim_remove(struct platform_device *pdev)
 static int kim_remove(struct platform_device *pdev)
 {
 {
 	/* free the GPIOs requested */
 	/* free the GPIOs requested */
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 	struct kim_data_s	*kim_gdata;
 	struct kim_data_s	*kim_gdata;
 
 
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
-
 	kim_gdata = platform_get_drvdata(pdev);
 	kim_gdata = platform_get_drvdata(pdev);
 
 
 	/* Free the Bluetooth/FM/GPIO
 	/* Free the Bluetooth/FM/GPIO
@@ -890,22 +828,12 @@ static int kim_remove(struct platform_device *pdev)
 
 
 	kfree(kim_gdata);
 	kfree(kim_gdata);
 	kim_gdata = NULL;
 	kim_gdata = NULL;
-	kfree(dt_pdata);
-	dt_pdata = NULL;
-
 	return 0;
 	return 0;
 }
 }
 
 
 static int kim_suspend(struct platform_device *pdev, pm_message_t state)
 static int kim_suspend(struct platform_device *pdev, pm_message_t state)
 {
 {
-	struct ti_st_plat_data	*pdata;
-
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 
 
 	if (pdata->suspend)
 	if (pdata->suspend)
 		return pdata->suspend(pdev, state);
 		return pdata->suspend(pdev, state);
@@ -915,14 +843,7 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state)
 
 
 static int kim_resume(struct platform_device *pdev)
 static int kim_resume(struct platform_device *pdev)
 {
 {
-	struct ti_st_plat_data	*pdata;
-
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 
 
 	if (pdata->resume)
 	if (pdata->resume)
 		return pdata->resume(pdev);
 		return pdata->resume(pdev);
@@ -939,8 +860,6 @@ static struct platform_driver kim_platform_driver = {
 	.resume = kim_resume,
 	.resume = kim_resume,
 	.driver = {
 	.driver = {
 		.name = "kim",
 		.name = "kim",
-		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(kim_of_match),
 	},
 	},
 };
 };
 
 

+ 2 - 15
drivers/misc/ti-st/st_ll.c

@@ -26,7 +26,6 @@
 #include <linux/ti_wilink_st.h>
 #include <linux/ti_wilink_st.h>
 
 
 /**********************************************************************/
 /**********************************************************************/
-
 /* internal functions */
 /* internal functions */
 static void send_ll_cmd(struct st_data_s *st_data,
 static void send_ll_cmd(struct st_data_s *st_data,
 	unsigned char cmd)
 	unsigned char cmd)
@@ -54,13 +53,7 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
 
 
 	/* communicate to platform about chip asleep */
 	/* communicate to platform about chip asleep */
 	kim_data = st_data->kim_data;
 	kim_data = st_data->kim_data;
-	if (kim_data->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_data->kim_pdev->dev.platform_data;
-	}
-
+	pdata = kim_data->kim_pdev->dev.platform_data;
 	if (pdata->chip_asleep)
 	if (pdata->chip_asleep)
 		pdata->chip_asleep(NULL);
 		pdata->chip_asleep(NULL);
 }
 }
@@ -93,13 +86,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
 
 
 	/* communicate to platform about chip wakeup */
 	/* communicate to platform about chip wakeup */
 	kim_data = st_data->kim_data;
 	kim_data = st_data->kim_data;
-	if (kim_data->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_data->kim_pdev->dev.platform_data;
-	}
-
+	pdata = kim_data->kim_pdev->dev.platform_data;
 	if (pdata->chip_awake)
 	if (pdata->chip_awake)
 		pdata->chip_awake(NULL);
 		pdata->chip_awake(NULL);
 }
 }

+ 0 - 1
drivers/misc/tsl2550.c

@@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(i2c, tsl2550_id);
 static struct i2c_driver tsl2550_driver = {
 static struct i2c_driver tsl2550_driver = {
 	.driver = {
 	.driver = {
 		.name	= TSL2550_DRV_NAME,
 		.name	= TSL2550_DRV_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= TSL2550_PM_OPS,
 		.pm	= TSL2550_PM_OPS,
 	},
 	},
 	.probe	= tsl2550_probe,
 	.probe	= tsl2550_probe,

+ 85 - 85
drivers/misc/vmw_balloon.c

@@ -46,7 +46,7 @@
 
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.3-k");
+MODULE_VERSION("1.3.0.0-k");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
@@ -110,9 +110,18 @@ MODULE_LICENSE("GPL");
  */
  */
 #define VMW_BALLOON_HV_PORT		0x5670
 #define VMW_BALLOON_HV_PORT		0x5670
 #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
 #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
-#define VMW_BALLOON_PROTOCOL_VERSION	2
 #define VMW_BALLOON_GUEST_ID		1	/* Linux */
 #define VMW_BALLOON_GUEST_ID		1	/* Linux */
 
 
+enum vmwballoon_capabilities {
+	/*
+	 * Bit 0 is reserved and not associated to any capability.
+	 */
+	VMW_BALLOON_BASIC_CMDS		= (1 << 1),
+	VMW_BALLOON_BATCHED_CMDS	= (1 << 2)
+};
+
+#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_BASIC_CMDS)
+
 #define VMW_BALLOON_CMD_START		0
 #define VMW_BALLOON_CMD_START		0
 #define VMW_BALLOON_CMD_GET_TARGET	1
 #define VMW_BALLOON_CMD_GET_TARGET	1
 #define VMW_BALLOON_CMD_LOCK		2
 #define VMW_BALLOON_CMD_LOCK		2
@@ -120,32 +129,36 @@ MODULE_LICENSE("GPL");
 #define VMW_BALLOON_CMD_GUEST_ID	4
 #define VMW_BALLOON_CMD_GUEST_ID	4
 
 
 /* error codes */
 /* error codes */
-#define VMW_BALLOON_SUCCESS		0
-#define VMW_BALLOON_FAILURE		-1
-#define VMW_BALLOON_ERROR_CMD_INVALID	1
-#define VMW_BALLOON_ERROR_PPN_INVALID	2
-#define VMW_BALLOON_ERROR_PPN_LOCKED	3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED	4
-#define VMW_BALLOON_ERROR_PPN_PINNED	5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	6
-#define VMW_BALLOON_ERROR_RESET		7
-#define VMW_BALLOON_ERROR_BUSY		8
-
-#define VMWARE_BALLOON_CMD(cmd, data, result)		\
-({							\
-	unsigned long __stat, __dummy1, __dummy2;	\
-	__asm__ __volatile__ ("inl %%dx" :		\
-		"=a"(__stat),				\
-		"=c"(__dummy1),				\
-		"=d"(__dummy2),				\
-		"=b"(result) :				\
-		"0"(VMW_BALLOON_HV_MAGIC),		\
-		"1"(VMW_BALLOON_CMD_##cmd),		\
-		"2"(VMW_BALLOON_HV_PORT),		\
-		"3"(data) :				\
-		"memory");				\
-	result &= -1UL;					\
-	__stat & -1UL;					\
+#define VMW_BALLOON_SUCCESS		        0
+#define VMW_BALLOON_FAILURE		        -1
+#define VMW_BALLOON_ERROR_CMD_INVALID	        1
+#define VMW_BALLOON_ERROR_PPN_INVALID	        2
+#define VMW_BALLOON_ERROR_PPN_LOCKED	        3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED	        4
+#define VMW_BALLOON_ERROR_PPN_PINNED	        5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	        6
+#define VMW_BALLOON_ERROR_RESET		        7
+#define VMW_BALLOON_ERROR_BUSY		        8
+
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
+
+#define VMWARE_BALLOON_CMD(cmd, data, result)			\
+({								\
+	unsigned long __status, __dummy1, __dummy2;		\
+	__asm__ __volatile__ ("inl %%dx" :			\
+		"=a"(__status),					\
+		"=c"(__dummy1),					\
+		"=d"(__dummy2),					\
+		"=b"(result) :					\
+		"0"(VMW_BALLOON_HV_MAGIC),			\
+		"1"(VMW_BALLOON_CMD_##cmd),			\
+		"2"(VMW_BALLOON_HV_PORT),			\
+		"3"(data) :					\
+		"memory");					\
+	if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START)	\
+		result = __dummy1;				\
+	result &= -1UL;						\
+	__status & -1UL;					\
 })
 })
 
 
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
@@ -223,11 +236,12 @@ static struct vmballoon balloon;
  */
  */
 static bool vmballoon_send_start(struct vmballoon *b)
 static bool vmballoon_send_start(struct vmballoon *b)
 {
 {
-	unsigned long status, dummy;
+	unsigned long status, capabilities;
 
 
 	STATS_INC(b->stats.start);
 	STATS_INC(b->stats.start);
 
 
-	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES,
+				capabilities);
 	if (status == VMW_BALLOON_SUCCESS)
 	if (status == VMW_BALLOON_SUCCESS)
 		return true;
 		return true;
 
 
@@ -402,55 +416,37 @@ static void vmballoon_reset(struct vmballoon *b)
 }
 }
 
 
 /*
 /*
- * Allocate (or reserve) a page for the balloon and notify the host.  If host
- * refuses the page put it on "refuse" list and allocate another one until host
- * is satisfied. "Refused" pages are released at the end of inflation cycle
- * (when we allocate b->rate_alloc pages).
+ * Notify the host of a ballooned page. If host rejects the page put it on the
+ * refuse list, those refused page are then released at the end of the
+ * inflation cycle.
  */
  */
-static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
 {
 {
-	struct page *page;
-	gfp_t flags;
-	unsigned int hv_status;
-	int locked;
-	flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
-
-	do {
-		if (!can_sleep)
-			STATS_INC(b->stats.alloc);
-		else
-			STATS_INC(b->stats.sleep_alloc);
+	int locked, hv_status;
 
 
-		page = alloc_page(flags);
-		if (!page) {
-			if (!can_sleep)
-				STATS_INC(b->stats.alloc_fail);
-			else
-				STATS_INC(b->stats.sleep_alloc_fail);
-			return -ENOMEM;
-		}
+	locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
+	if (locked > 0) {
+		STATS_INC(b->stats.refused_alloc);
 
 
-		/* inform monitor */
-		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
-		if (locked > 0) {
-			STATS_INC(b->stats.refused_alloc);
-
-			if (hv_status == VMW_BALLOON_ERROR_RESET ||
-			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
-				__free_page(page);
-				return -EIO;
-			}
+		if (hv_status == VMW_BALLOON_ERROR_RESET ||
+				hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+			__free_page(page);
+			return -EIO;
+		}
 
 
-			/*
-			 * Place page on the list of non-balloonable pages
-			 * and retry allocation, unless we already accumulated
-			 * too many of them, in which case take a breather.
-			 */
+		/*
+		 * Place page on the list of non-balloonable pages
+		 * and retry allocation, unless we already accumulated
+		 * too many of them, in which case take a breather.
+		 */
+		if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
+			b->n_refused_pages++;
 			list_add(&page->lru, &b->refused_pages);
 			list_add(&page->lru, &b->refused_pages);
-			if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
-				return -EIO;
+		} else {
+			__free_page(page);
 		}
 		}
-	} while (locked != 0);
+		return -EIO;
+	}
 
 
 	/* track allocated page */
 	/* track allocated page */
 	list_add(&page->lru, &b->pages);
 	list_add(&page->lru, &b->pages);
@@ -512,7 +508,7 @@ static void vmballoon_inflate(struct vmballoon *b)
 	unsigned int i;
 	unsigned int i;
 	unsigned int allocations = 0;
 	unsigned int allocations = 0;
 	int error = 0;
 	int error = 0;
-	bool alloc_can_sleep = false;
+	gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
 
 
 	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
 	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
 
 
@@ -543,19 +539,16 @@ static void vmballoon_inflate(struct vmballoon *b)
 		 __func__, goal, rate, b->rate_alloc);
 		 __func__, goal, rate, b->rate_alloc);
 
 
 	for (i = 0; i < goal; i++) {
 	for (i = 0; i < goal; i++) {
+		struct page *page;
 
 
-		error = vmballoon_reserve_page(b, alloc_can_sleep);
-		if (error) {
-			if (error != -ENOMEM) {
-				/*
-				 * Not a page allocation failure, stop this
-				 * cycle. Maybe we'll get new target from
-				 * the host soon.
-				 */
-				break;
-			}
+		if (flags == VMW_PAGE_ALLOC_NOSLEEP)
+			STATS_INC(b->stats.alloc);
+		else
+			STATS_INC(b->stats.sleep_alloc);
 
 
-			if (alloc_can_sleep) {
+		page = alloc_page(flags);
+		if (!page) {
+			if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
 				/*
 				/*
 				 * CANSLEEP page allocation failed, so guest
 				 * CANSLEEP page allocation failed, so guest
 				 * is under severe memory pressure. Quickly
 				 * is under severe memory pressure. Quickly
@@ -563,8 +556,10 @@ static void vmballoon_inflate(struct vmballoon *b)
 				 */
 				 */
 				b->rate_alloc = max(b->rate_alloc / 2,
 				b->rate_alloc = max(b->rate_alloc / 2,
 						    VMW_BALLOON_RATE_ALLOC_MIN);
 						    VMW_BALLOON_RATE_ALLOC_MIN);
+				STATS_INC(b->stats.sleep_alloc_fail);
 				break;
 				break;
 			}
 			}
+			STATS_INC(b->stats.alloc_fail);
 
 
 			/*
 			/*
 			 * NOSLEEP page allocation failed, so the guest is
 			 * NOSLEEP page allocation failed, so the guest is
@@ -579,11 +574,16 @@ static void vmballoon_inflate(struct vmballoon *b)
 			if (i >= b->rate_alloc)
 			if (i >= b->rate_alloc)
 				break;
 				break;
 
 
-			alloc_can_sleep = true;
+			flags = VMW_PAGE_ALLOC_CANSLEEP;
 			/* Lower rate for sleeping allocations. */
 			/* Lower rate for sleeping allocations. */
 			rate = b->rate_alloc;
 			rate = b->rate_alloc;
+			continue;
 		}
 		}
 
 
+		error = vmballoon_lock_page(b, page);
+		if (error)
+			break;
+
 		if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
 		if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
 			cond_resched();
 			cond_resched();
 			allocations = 0;
 			allocations = 0;

+ 1 - 6
drivers/misc/vmw_vmci/vmci_host.c

@@ -1031,14 +1031,9 @@ int __init vmci_host_init(void)
 
 
 void __exit vmci_host_exit(void)
 void __exit vmci_host_exit(void)
 {
 {
-	int error;
-
 	vmci_host_device_initialized = false;
 	vmci_host_device_initialized = false;
 
 
-	error = misc_deregister(&vmci_host_miscdev);
-	if (error)
-		pr_warn("Error unregistering character device: %d\n", error);
-
+	misc_deregister(&vmci_host_miscdev);
 	vmci_ctx_destroy(host_context);
 	vmci_ctx_destroy(host_context);
 	vmci_qp_broker_exit();
 	vmci_qp_broker_exit();
 
 

+ 2 - 1
drivers/nfc/mei_phy.c

@@ -355,7 +355,8 @@ static int nfc_mei_phy_enable(void *phy_id)
 		goto err;
 		goto err;
 	}
 	}
 
 
-	r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
+	r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX),
+				     nfc_mei_event_cb, phy);
 	if (r) {
 	if (r) {
 		pr_err("Event cb registration failed %d\n", r);
 		pr_err("Event cb registration failed %d\n", r);
 		goto err;
 		goto err;

+ 39 - 0
drivers/nvmem/Kconfig

@@ -0,0 +1,39 @@
+menuconfig NVMEM
+	tristate "NVMEM Support"
+	select REGMAP
+	help
+	  Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
+
+	  This framework is designed to provide a generic interface to NVMEM
+	  from both the Linux Kernel and the userspace.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_core.
+
+	  If unsure, say no.
+
+if NVMEM
+
+config QCOM_QFPROM
+	tristate "QCOM QFPROM Support"
+	depends on ARCH_QCOM || COMPILE_TEST
+	select REGMAP_MMIO
+	help
+	  Say y here to enable QFPROM support. The QFPROM provides access
+	  functions for QFPROM data to rest of the drivers via nvmem interface.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_qfprom.
+
+config NVMEM_SUNXI_SID
+	tristate "Allwinner SoCs SID support"
+	depends on ARCH_SUNXI
+	select REGMAP_MMIO
+	help
+	  This is a driver for the 'security ID' available on various Allwinner
+	  devices.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_sunxi_sid.
+
+endif

+ 12 - 0
drivers/nvmem/Makefile

@@ -0,0 +1,12 @@
+#
+# Makefile for nvmem drivers.
+#
+
+obj-$(CONFIG_NVMEM)		+= nvmem_core.o
+nvmem_core-y			:= core.o
+
+# Devices
+obj-$(CONFIG_QCOM_QFPROM)	+= nvmem_qfprom.o
+nvmem_qfprom-y			:= qfprom.o
+obj-$(CONFIG_NVMEM_SUNXI_SID)	+= nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y		:= sunxi_sid.o

+ 1083 - 0
drivers/nvmem/core.c

@@ -0,0 +1,1083 @@
+/*
+ * nvmem framework core.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct nvmem_device {
+	const char		*name;
+	struct regmap		*regmap;
+	struct module		*owner;
+	struct device		dev;
+	int			stride;
+	int			word_size;
+	int			ncells;
+	int			id;
+	int			users;
+	size_t			size;
+	bool			read_only;
+};
+
+struct nvmem_cell {
+	const char		*name;
+	int			offset;
+	int			bytes;
+	int			bit_offset;
+	int			nbits;
+	struct nvmem_device	*nvmem;
+	struct list_head	node;
+};
+
+static DEFINE_MUTEX(nvmem_mutex);
+static DEFINE_IDA(nvmem_ida);
+
+static LIST_HEAD(nvmem_cells);
+static DEFINE_MUTEX(nvmem_cells_mutex);
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *attr,
+				    char *buf, loff_t pos, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+	int rc;
+
+	/* Stop the user from reading */
+	if (pos > nvmem->size)
+		return 0;
+
+	if (pos + count > nvmem->size)
+		count = nvmem->size - pos;
+
+	count = round_down(count, nvmem->word_size);
+
+	rc = regmap_raw_read(nvmem->regmap, pos, buf, count);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t pos, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+	int rc;
+
+	/* Stop the user from writing */
+	if (pos > nvmem->size)
+		return 0;
+
+	if (pos + count > nvmem->size)
+		count = nvmem->size - pos;
+
+	count = round_down(count, nvmem->word_size);
+
+	rc = regmap_raw_write(nvmem->regmap, pos, buf, count);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return count;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+	.attr	= {
+		.name	= "nvmem",
+		.mode	= S_IWUSR | S_IRUGO,
+	},
+	.read	= bin_attr_nvmem_read,
+	.write	= bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_attributes[] = {
+	&bin_attr_rw_nvmem,
+	NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_group = {
+	.bin_attrs	= nvmem_bin_rw_attributes,
+};
+
+static const struct attribute_group *nvmem_rw_dev_groups[] = {
+	&nvmem_bin_rw_group,
+	NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+	.attr	= {
+		.name	= "nvmem",
+		.mode	= S_IRUGO,
+	},
+	.read	= bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_attributes[] = {
+	&bin_attr_ro_nvmem,
+	NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_group = {
+	.bin_attrs	= nvmem_bin_ro_attributes,
+};
+
+static const struct attribute_group *nvmem_ro_dev_groups[] = {
+	&nvmem_bin_ro_group,
+	NULL,
+};
+
+static void nvmem_release(struct device *dev)
+{
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+	ida_simple_remove(&nvmem_ida, nvmem->id);
+	kfree(nvmem);
+}
+
+static const struct device_type nvmem_provider_type = {
+	.release	= nvmem_release,
+};
+
+static struct bus_type nvmem_bus_type = {
+	.name		= "nvmem",
+};
+
+static int of_nvmem_match(struct device *dev, void *nvmem_np)
+{
+	return dev->of_node == nvmem_np;
+}
+
+static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
+{
+	struct device *d;
+
+	if (!nvmem_np)
+		return NULL;
+
+	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
+
+	if (!d)
+		return NULL;
+
+	return to_nvmem_device(d);
+}
+
+static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
+{
+	struct nvmem_cell *p;
+
+	list_for_each_entry(p, &nvmem_cells, node)
+		if (p && !strcmp(p->name, cell_id))
+			return p;
+
+	return NULL;
+}
+
+static void nvmem_cell_drop(struct nvmem_cell *cell)
+{
+	mutex_lock(&nvmem_cells_mutex);
+	list_del(&cell->node);
+	mutex_unlock(&nvmem_cells_mutex);
+	kfree(cell);
+}
+
+static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
+{
+	struct nvmem_cell *cell;
+	struct list_head *p, *n;
+
+	list_for_each_safe(p, n, &nvmem_cells) {
+		cell = list_entry(p, struct nvmem_cell, node);
+		if (cell->nvmem == nvmem)
+			nvmem_cell_drop(cell);
+	}
+}
+
+static void nvmem_cell_add(struct nvmem_cell *cell)
+{
+	mutex_lock(&nvmem_cells_mutex);
+	list_add_tail(&cell->node, &nvmem_cells);
+	mutex_unlock(&nvmem_cells_mutex);
+}
+
+static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+				   const struct nvmem_cell_info *info,
+				   struct nvmem_cell *cell)
+{
+	cell->nvmem = nvmem;
+	cell->offset = info->offset;
+	cell->bytes = info->bytes;
+	cell->name = info->name;
+
+	cell->bit_offset = info->bit_offset;
+	cell->nbits = info->nbits;
+
+	if (cell->nbits)
+		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+					   BITS_PER_BYTE);
+
+	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+		dev_err(&nvmem->dev,
+			"cell %s unaligned to nvmem stride %d\n",
+			cell->name, nvmem->stride);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nvmem_add_cells(struct nvmem_device *nvmem,
+			   const struct nvmem_config *cfg)
+{
+	struct nvmem_cell **cells;
+	const struct nvmem_cell_info *info = cfg->cells;
+	int i, rval;
+
+	cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL);
+	if (!cells)
+		return -ENOMEM;
+
+	for (i = 0; i < cfg->ncells; i++) {
+		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
+		if (!cells[i]) {
+			rval = -ENOMEM;
+			goto err;
+		}
+
+		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
+		if (IS_ERR_VALUE(rval)) {
+			kfree(cells[i]);
+			goto err;
+		}
+
+		nvmem_cell_add(cells[i]);
+	}
+
+	nvmem->ncells = cfg->ncells;
+	/* remove tmp array */
+	kfree(cells);
+
+	return 0;
+err:
+	while (--i)
+		nvmem_cell_drop(cells[i]);
+
+	return rval;
+}
+
+/**
+ * nvmem_register() - Register a nvmem device for given nvmem_config.
+ * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ *
+ * @config: nvmem device configuration with which nvmem device is created.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
+ * on success.
+ */
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+{
+	struct nvmem_device *nvmem;
+	struct device_node *np;
+	struct regmap *rm;
+	int rval;
+
+	if (!config->dev)
+		return ERR_PTR(-EINVAL);
+
+	rm = dev_get_regmap(config->dev, NULL);
+	if (!rm) {
+		dev_err(config->dev, "Regmap not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
+	if (!nvmem)
+		return ERR_PTR(-ENOMEM);
+
+	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
+	if (rval < 0) {
+		kfree(nvmem);
+		return ERR_PTR(rval);
+	}
+
+	nvmem->id = rval;
+	nvmem->regmap = rm;
+	nvmem->owner = config->owner;
+	nvmem->stride = regmap_get_reg_stride(rm);
+	nvmem->word_size = regmap_get_val_bytes(rm);
+	nvmem->size = regmap_get_max_register(rm) + nvmem->stride;
+	nvmem->dev.type = &nvmem_provider_type;
+	nvmem->dev.bus = &nvmem_bus_type;
+	nvmem->dev.parent = config->dev;
+	np = config->dev->of_node;
+	nvmem->dev.of_node = np;
+	dev_set_name(&nvmem->dev, "%s%d",
+		     config->name ? : "nvmem", config->id);
+
+	nvmem->read_only = of_property_read_bool(np, "read-only") |
+			   config->read_only;
+
+	nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups :
+					       nvmem_rw_dev_groups;
+
+	device_initialize(&nvmem->dev);
+
+	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+	rval = device_add(&nvmem->dev);
+	if (rval) {
+		ida_simple_remove(&nvmem_ida, nvmem->id);
+		kfree(nvmem);
+		return ERR_PTR(rval);
+	}
+
+	if (config->cells)
+		nvmem_add_cells(nvmem, config);
+
+	return nvmem;
+}
+EXPORT_SYMBOL_GPL(nvmem_register);
+
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int nvmem_unregister(struct nvmem_device *nvmem)
+{
+	mutex_lock(&nvmem_mutex);
+	if (nvmem->users) {
+		mutex_unlock(&nvmem_mutex);
+		return -EBUSY;
+	}
+	mutex_unlock(&nvmem_mutex);
+
+	nvmem_device_remove_all_cells(nvmem);
+	device_del(&nvmem->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister);
+
+static struct nvmem_device *__nvmem_device_get(struct device_node *np,
+					       struct nvmem_cell **cellp,
+					       const char *cell_id)
+{
+	struct nvmem_device *nvmem = NULL;
+
+	mutex_lock(&nvmem_mutex);
+
+	if (np) {
+		nvmem = of_nvmem_find(np);
+		if (!nvmem) {
+			mutex_unlock(&nvmem_mutex);
+			return ERR_PTR(-EPROBE_DEFER);
+		}
+	} else {
+		struct nvmem_cell *cell = nvmem_find_cell(cell_id);
+
+		if (cell) {
+			nvmem = cell->nvmem;
+			*cellp = cell;
+		}
+
+		if (!nvmem) {
+			mutex_unlock(&nvmem_mutex);
+			return ERR_PTR(-ENOENT);
+		}
+	}
+
+	nvmem->users++;
+	mutex_unlock(&nvmem_mutex);
+
+	if (!try_module_get(nvmem->owner)) {
+		dev_err(&nvmem->dev,
+			"could not increase module refcount for cell %s\n",
+			nvmem->name);
+
+		mutex_lock(&nvmem_mutex);
+		nvmem->users--;
+		mutex_unlock(&nvmem_mutex);
+
+		return ERR_PTR(-EINVAL);
+	}
+
+	return nvmem;
+}
+
+static void __nvmem_device_put(struct nvmem_device *nvmem)
+{
+	module_put(nvmem->owner);
+	mutex_lock(&nvmem_mutex);
+	nvmem->users--;
+	mutex_unlock(&nvmem_mutex);
+}
+
+static int nvmem_match(struct device *dev, void *data)
+{
+	return !strcmp(dev_name(dev), data);
+}
+
+static struct nvmem_device *nvmem_find(const char *name)
+{
+	struct device *d;
+
+	d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
+
+	if (!d)
+		return NULL;
+
+	return to_nvmem_device(d);
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev node: Device tree node that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+{
+
+	struct device_node *nvmem_np;
+	int index;
+
+	index = of_property_match_string(np, "nvmem-names", id);
+
+	nvmem_np = of_parse_phandle(np, "nvmem", index);
+	if (!nvmem_np)
+		return ERR_PTR(-EINVAL);
+
+	return __nvmem_device_get(nvmem_np, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_device_get);
+#endif
+
+/**
+ * nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev : Device that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
+{
+	if (dev->of_node) { /* try dt first */
+		struct nvmem_device *nvmem;
+
+		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
+
+		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
+			return nvmem;
+
+	}
+
+	return nvmem_find(dev_name);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_get);
+
+static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
+{
+	struct nvmem_device **nvmem = res;
+
+	if (WARN_ON(!nvmem || !*nvmem))
+		return 0;
+
+	return *nvmem == data;
+}
+
+static void devm_nvmem_device_release(struct device *dev, void *res)
+{
+	nvmem_device_put(*(struct nvmem_device **)res);
+}
+
+/**
+ * devm_nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
+ * that needs to be released.
+ */
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_nvmem_device_release,
+			     devm_nvmem_device_match, nvmem);
+
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
+
+/**
+ * nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device that needs to be released.
+ */
+void nvmem_device_put(struct nvmem_device *nvmem)
+{
+	__nvmem_device_put(nvmem);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_put);
+
+/**
+ * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem name in nvmems property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+ * on success.  The nvmem_cell will be freed by the automatically once the
+ * device is freed.
+ */
+struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+{
+	struct nvmem_device **ptr, *nvmem;
+
+	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	nvmem = nvmem_device_get(dev, id);
+	if (!IS_ERR(nvmem)) {
+		*ptr = nvmem;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return nvmem;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
+
+static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
+{
+	struct nvmem_cell *cell = NULL;
+	struct nvmem_device *nvmem;
+
+	nvmem = __nvmem_device_get(NULL, &cell, cell_id);
+	if (IS_ERR(nvmem))
+		return ERR_CAST(nvmem);
+
+	return cell;
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name from nvmem-cell-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+					    const char *name)
+{
+	struct device_node *cell_np, *nvmem_np;
+	struct nvmem_cell *cell;
+	struct nvmem_device *nvmem;
+	const __be32 *addr;
+	int rval, len, index;
+
+	index = of_property_match_string(np, "nvmem-cell-names", name);
+
+	cell_np = of_parse_phandle(np, "nvmem-cells", index);
+	if (!cell_np)
+		return ERR_PTR(-EINVAL);
+
+	nvmem_np = of_get_next_parent(cell_np);
+	if (!nvmem_np)
+		return ERR_PTR(-EINVAL);
+
+	nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+	if (IS_ERR(nvmem))
+		return ERR_CAST(nvmem);
+
+	addr = of_get_property(cell_np, "reg", &len);
+	if (!addr || (len < 2 * sizeof(u32))) {
+		dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
+			cell_np->full_name);
+		rval  = -EINVAL;
+		goto err_mem;
+	}
+
+	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+	if (!cell) {
+		rval = -ENOMEM;
+		goto err_mem;
+	}
+
+	cell->nvmem = nvmem;
+	cell->offset = be32_to_cpup(addr++);
+	cell->bytes = be32_to_cpup(addr);
+	cell->name = cell_np->name;
+
+	addr = of_get_property(cell_np, "bits", &len);
+	if (addr && len == (2 * sizeof(u32))) {
+		cell->bit_offset = be32_to_cpup(addr++);
+		cell->nbits = be32_to_cpup(addr);
+	}
+
+	if (cell->nbits)
+		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+					   BITS_PER_BYTE);
+
+	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+			dev_err(&nvmem->dev,
+				"cell %s unaligned to nvmem stride %d\n",
+				cell->name, nvmem->stride);
+		rval  = -EINVAL;
+		goto err_sanity;
+	}
+
+	nvmem_cell_add(cell);
+
+	return cell;
+
+err_sanity:
+	kfree(cell);
+
+err_mem:
+	__nvmem_device_put(nvmem);
+
+	return ERR_PTR(rval);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
+#endif
+
+/**
+ * nvmem_cell_get() - Get nvmem cell of device form a given cell name
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name to get.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+{
+	struct nvmem_cell *cell;
+
+	if (dev->of_node) { /* try dt first */
+		cell = of_nvmem_cell_get(dev->of_node, cell_id);
+		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
+			return cell;
+	}
+
+	return nvmem_cell_get_from_list(cell_id);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_get);
+
+static void devm_nvmem_cell_release(struct device *dev, void *res)
+{
+	nvmem_cell_put(*(struct nvmem_cell **)res);
+}
+
+/**
+ * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem id in nvmem-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * automatically once the device is freed.
+ */
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
+{
+	struct nvmem_cell **ptr, *cell;
+
+	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	cell = nvmem_cell_get(dev, id);
+	if (!IS_ERR(cell)) {
+		*ptr = cell;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return cell;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
+
+static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
+{
+	struct nvmem_cell **c = res;
+
+	if (WARN_ON(!c || !*c))
+		return 0;
+
+	return *c == data;
+}
+
+/**
+ * devm_nvmem_cell_put() - Release previously allocated nvmem cell
+ * from devm_nvmem_cell_get.
+ *
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
+ */
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_nvmem_cell_release,
+				devm_nvmem_cell_match, cell);
+
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_nvmem_cell_put);
+
+/**
+ * nvmem_cell_put() - Release previously allocated nvmem cell.
+ *
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get()
+ */
+void nvmem_cell_put(struct nvmem_cell *cell)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+
+	__nvmem_device_put(nvmem);
+	nvmem_cell_drop(cell);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_put);
+
+static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
+						    void *buf)
+{
+	u8 *p, *b;
+	int i, bit_offset = cell->bit_offset;
+
+	p = b = buf;
+	if (bit_offset) {
+		/* First shift */
+		*b++ >>= bit_offset;
+
+		/* setup rest of the bytes if any */
+		for (i = 1; i < cell->bytes; i++) {
+			/* Get bits from next byte and shift them towards msb */
+			*p |= *b << (BITS_PER_BYTE - bit_offset);
+
+			p = b;
+			*b++ >>= bit_offset;
+		}
+
+		/* result fits in less bytes */
+		if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
+			*p-- = 0;
+	}
+	/* clear msb bits if any leftover in the last byte */
+	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+}
+
+static int __nvmem_cell_read(struct nvmem_device *nvmem,
+		      struct nvmem_cell *cell,
+		      void *buf, size_t *len)
+{
+	int rc;
+
+	rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	/* shift bits in-place */
+	if (cell->bit_offset || cell->bit_offset)
+		nvmem_shift_read_buffer_in_place(cell, buf);
+
+	*len = cell->bytes;
+
+	return 0;
+}
+
+/**
+ * nvmem_cell_read() - Read a given nvmem cell
+ *
+ * @cell: nvmem cell to be read.
+ * @len: pointer to length of cell which will be populated on successful read.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
+ * The buffer should be freed by the consumer with a kfree().
+ */
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	u8 *buf;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(cell->bytes, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	rc = __nvmem_cell_read(nvmem, cell, buf, len);
+	if (IS_ERR_VALUE(rc)) {
+		kfree(buf);
+		return ERR_PTR(rc);
+	}
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read);
+
+static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+						    u8 *_buf, int len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	int i, rc, nbits, bit_offset = cell->bit_offset;
+	u8 v, *p, *buf, *b, pbyte, pbits;
+
+	nbits = cell->nbits;
+	buf = kzalloc(cell->bytes, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(buf, _buf, len);
+	p = b = buf;
+
+	if (bit_offset) {
+		pbyte = *b;
+		*b <<= bit_offset;
+
+		/* setup the first byte with lsb bits from nvmem */
+		rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
+		*b++ |= GENMASK(bit_offset - 1, 0) & v;
+
+		/* setup rest of the byte if any */
+		for (i = 1; i < cell->bytes; i++) {
+			/* Get last byte bits and shift them towards lsb */
+			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
+			pbyte = *b;
+			p = b;
+			*b <<= bit_offset;
+			*b++ |= pbits;
+		}
+	}
+
+	/* if it's not end on byte boundary */
+	if ((nbits + bit_offset) % BITS_PER_BYTE) {
+		/* setup the last byte with msb bits from nvmem */
+		rc = regmap_raw_read(nvmem->regmap,
+				    cell->offset + cell->bytes - 1, &v, 1);
+		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
+
+	}
+
+	return buf;
+}
+
+/**
+ * nvmem_cell_write() - Write to a given nvmem cell
+ *
+ * @cell: nvmem cell to be written.
+ * @buf: Buffer to be written.
+ * @len: length of buffer to be written to nvmem cell.
+ *
+ * Return: length of bytes written or negative on failure.
+ */
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap || nvmem->read_only ||
+	    (cell->bit_offset == 0 && len != cell->bytes))
+		return -EINVAL;
+
+	if (cell->bit_offset || cell->nbits) {
+		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+		if (IS_ERR(buf))
+			return PTR_ERR(buf);
+	}
+
+	rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+	/* free the tmp buffer */
+	if (cell->bit_offset)
+		kfree(buf);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_write);
+
+/**
+ * nvmem_device_cell_read() - Read a given nvmem device and cell
+ *
+ * @nvmem: nvmem device to read from.
+ * @info: nvmem cell info to be read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+			   struct nvmem_cell_info *info, void *buf)
+{
+	struct nvmem_cell cell;
+	int rc;
+	ssize_t len;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
+
+/**
+ * nvmem_device_cell_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @info: nvmem cell info to be written
+ * @buf: buffer to be written to cell.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+			    struct nvmem_cell_info *info, void *buf)
+{
+	struct nvmem_cell cell;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return nvmem_cell_write(&cell, buf, cell.bytes);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
+
+/**
+ * nvmem_device_read() - Read from a given nvmem device
+ *
+ * @nvmem: nvmem device to read from.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+int nvmem_device_read(struct nvmem_device *nvmem,
+		      unsigned int offset,
+		      size_t bytes, void *buf)
+{
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_read);
+
+/**
+ * nvmem_device_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to write.
+ * @buf: buffer to be written.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_write(struct nvmem_device *nvmem,
+		       unsigned int offset,
+		       size_t bytes, void *buf)
+{
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+
+	return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_write);
+
+static int __init nvmem_init(void)
+{
+	return bus_register(&nvmem_bus_type);
+}
+
+static void __exit nvmem_exit(void)
+{
+	bus_unregister(&nvmem_bus_type);
+}
+
+subsys_initcall(nvmem_init);
+module_exit(nvmem_exit);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("nvmem Driver Core");
+MODULE_LICENSE("GPL v2");

+ 85 - 0
drivers/nvmem/qfprom.c

@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config qfprom_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 8,
+	.reg_stride = 1,
+};
+
+static struct nvmem_config econfig = {
+	.name = "qfprom",
+	.owner = THIS_MODULE,
+};
+
+static int qfprom_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct nvmem_device *nvmem;
+	struct regmap *regmap;
+	void __iomem *base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	qfprom_regmap_config.max_register = resource_size(res) - 1;
+
+	regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
+	econfig.dev = dev;
+	nvmem = nvmem_register(&econfig);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static const struct of_device_id qfprom_of_match[] = {
+	{ .compatible = "qcom,qfprom",},
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+	.probe = qfprom_probe,
+	.remove = qfprom_remove,
+	.driver = {
+		.name = "qcom,qfprom",
+		.of_match_table = qfprom_of_match,
+	},
+};
+module_platform_driver(qfprom_driver);
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm QFPROM driver");
+MODULE_LICENSE("GPL v2");

+ 171 - 0
drivers/nvmem/sunxi_sid.c

@@ -0,0 +1,171 @@
+/*
+ * Allwinner sunXi SoCs Security ID support.
+ *
+ * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+
+static struct nvmem_config econfig = {
+	.name = "sunxi-sid",
+	.read_only = true,
+	.owner = THIS_MODULE,
+};
+
+struct sunxi_sid {
+	void __iomem		*base;
+};
+
+/* We read the entire key, due to a 32 bit read alignment requirement. Since we
+ * want to return the requested byte, this results in somewhat slower code and
+ * uses 4 times more reads as needed but keeps code simpler. Since the SID is
+ * only very rarely probed, this is not really an issue.
+ */
+static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
+			      const unsigned int offset)
+{
+	u32 sid_key;
+
+	sid_key = ioread32be(sid->base + round_down(offset, 4));
+	sid_key >>= (offset % 4) * 8;
+
+	return sid_key; /* Only return the last byte */
+}
+
+static int sunxi_sid_read(void *context,
+			    const void *reg, size_t reg_size,
+			    void *val, size_t val_size)
+{
+	struct sunxi_sid *sid = context;
+	unsigned int offset = *(u32 *)reg;
+	u8 *buf = val;
+
+	while (val_size) {
+		*buf++ = sunxi_sid_read_byte(sid, offset);
+		val_size--;
+		offset++;
+	}
+
+	return 0;
+}
+
+static int sunxi_sid_write(void *context, const void *data, size_t count)
+{
+	/* Unimplemented, dummy to keep regmap core happy */
+	return 0;
+}
+
+static struct regmap_bus sunxi_sid_bus = {
+	.read = sunxi_sid_read,
+	.write = sunxi_sid_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
+{
+	return false;
+}
+
+static struct regmap_config sunxi_sid_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 8,
+	.reg_stride = 1,
+	.writeable_reg = sunxi_sid_writeable_reg,
+};
+
+static int sunxi_sid_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct nvmem_device *nvmem;
+	struct regmap *regmap;
+	struct sunxi_sid *sid;
+	int i, size;
+	char *randomness;
+
+	sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
+	if (!sid)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	sid->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(sid->base))
+		return PTR_ERR(sid->base);
+
+	size = resource_size(res) - 1;
+	sunxi_sid_regmap_config.max_register = size;
+
+	regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
+				  &sunxi_sid_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
+
+	econfig.dev = dev;
+	nvmem = nvmem_register(&econfig);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+	for (i = 0; i < size; i++)
+		randomness[i] = sunxi_sid_read_byte(sid, i);
+
+	add_device_randomness(randomness, size);
+	kfree(randomness);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static int sunxi_sid_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id sunxi_sid_of_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-sid" },
+	{ .compatible = "allwinner,sun7i-a20-sid" },
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+
+static struct platform_driver sunxi_sid_driver = {
+	.probe = sunxi_sid_probe,
+	.remove = sunxi_sid_remove,
+	.driver = {
+		.name = "eeprom-sunxi-sid",
+		.of_match_table = sunxi_sid_of_match,
+	},
+};
+module_platform_driver(sunxi_sid_driver);
+
+MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
+MODULE_DESCRIPTION("Allwinner sunxi security id driver");
+MODULE_LICENSE("GPL");

+ 2 - 3
drivers/rtc/rtc-ds1374.c

@@ -666,9 +666,8 @@ static int ds1374_remove(struct i2c_client *client)
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
 	int res;
 	int res;
 
 
-	res = misc_deregister(&ds1374_miscdev);
-	if (!res)
-		ds1374_miscdev.parent = NULL;
+	misc_deregister(&ds1374_miscdev);
+	ds1374_miscdev.parent = NULL;
 	unregister_reboot_notifier(&ds1374_wdt_notifier);
 	unregister_reboot_notifier(&ds1374_wdt_notifier);
 #endif
 #endif
 
 

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно