Эх сурвалжийг харах

Merge tag 'char-misc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc updates from Greg KH:
 "Here is the big char/misc driver update for 4.6-rc1.

  The majority of the patches here is hwtracing and some new mic
  drivers, but there's a lot of other driver updates as well.  Full
  details in the shortlog.

  All have been in linux-next for a while with no reported issues"

* tag 'char-misc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (238 commits)
  goldfish: Fix build error of missing ioremap on UM
  nvmem: mediatek: Fix later provider initialization
  nvmem: imx-ocotp: Fix return value of imx_ocotp_read
  nvmem: Fix dependencies for !HAS_IOMEM archs
  char: genrtc: replace blacklist with whitelist
  drivers/hwtracing: make coresight-etm-perf.c explicitly non-modular
  drivers: char: mem: fix IS_ERROR_VALUE usage
  char: xillybus: Fix internal data structure initialization
  pch_phub: return -ENODATA if ROM can't be mapped
  Drivers: hv: vmbus: Support kexec on ws2012 r2 and above
  Drivers: hv: vmbus: Support handling messages on multiple CPUs
  Drivers: hv: utils: Remove util transport handler from list if registration fails
  Drivers: hv: util: Pass the channel information during the init call
  Drivers: hv: vmbus: avoid unneeded compiler optimizations in vmbus_wait_for_unload()
  Drivers: hv: vmbus: remove code duplication in message handling
  Drivers: hv: vmbus: avoid wait_for_completion() on crash
  Drivers: hv: vmbus: don't loose HVMSG_TIMER_EXPIRED messages
  misc: at24: replace memory_accessor with nvmem_device_read
  eeprom: 93xx46: extend driver to plug into the NVMEM framework
  eeprom: at25: extend driver to plug into the NVMEM framework
  ...
Linus Torvalds 9 жил өмнө
parent
commit
8eee93e257
100 өөрчлөгдсөн 4982 нэмэгдсэн , 2697 устгасан
  1. 14 0
      Documentation/ABI/stable/sysfs-bus-vmbus
  2. 17 0
      Documentation/devicetree/bindings/goldfish/pipe.txt
  3. 25 0
      Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
  4. 28 0
      Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
  5. 36 0
      Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
  6. 30 24
      Documentation/mic/mic_overview.txt
  7. 1 1
      Documentation/mic/mpssd/mpss
  8. 1 1
      Documentation/mic/mpssd/mpssd.c
  9. 6 6
      Documentation/misc-devices/mei/mei.txt
  10. 6 0
      MAINTAINERS
  11. 1 2
      arch/arm/boot/dts/am57xx-beagle-x15.dts
  12. 3 2
      arch/arm/mach-davinci/board-mityomapl138.c
  13. 2 2
      arch/arm/mach-davinci/common.c
  14. 22 4
      drivers/android/binder.c
  15. 4 5
      drivers/base/firmware_class.c
  16. 2 1
      drivers/char/Kconfig
  17. 1 1
      drivers/char/mem.c
  18. 6 6
      drivers/char/nvram.c
  19. 2 3
      drivers/char/nwbutton.c
  20. 226 171
      drivers/char/ppdev.c
  21. 1 3
      drivers/char/raw.c
  22. 3 1
      drivers/char/xillybus/xillybus_core.c
  23. 2 2
      drivers/extcon/extcon-arizona.c
  24. 1 1
      drivers/extcon/extcon-gpio.c
  25. 3 0
      drivers/extcon/extcon-max14577.c
  26. 11 1
      drivers/extcon/extcon-max77693.c
  27. 4 1
      drivers/extcon/extcon-max77843.c
  28. 3 0
      drivers/extcon/extcon-max8997.c
  29. 52 2
      drivers/extcon/extcon-palmas.c
  30. 6 2
      drivers/extcon/extcon-rt8973a.c
  31. 6 2
      drivers/extcon/extcon-sm5502.c
  32. 31 5
      drivers/hv/channel.c
  33. 213 49
      drivers/hv/channel_mgmt.c
  34. 14 6
      drivers/hv/connection.c
  35. 18 18
      drivers/hv/hv.c
  36. 1 1
      drivers/hv/hv_fcopy.c
  37. 1 1
      drivers/hv/hv_kvp.c
  38. 1 1
      drivers/hv/hv_snapshot.c
  39. 1 0
      drivers/hv/hv_util.c
  40. 3 0
      drivers/hv/hv_utils_transport.c
  41. 30 6
      drivers/hv/hyperv_vmbus.h
  42. 13 18
      drivers/hv/ring_buffer.c
  43. 51 66
      drivers/hv/vmbus_drv.c
  44. 1 0
      drivers/hwtracing/coresight/Kconfig
  45. 3 1
      drivers/hwtracing/coresight/Makefile
  46. 264 29
      drivers/hwtracing/coresight/coresight-etb10.c
  47. 393 0
      drivers/hwtracing/coresight/coresight-etm-perf.c
  48. 32 0
      drivers/hwtracing/coresight/coresight-etm-perf.h
  49. 96 46
      drivers/hwtracing/coresight/coresight-etm.h
  50. 1272 0
      drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
  51. 349 1388
      drivers/hwtracing/coresight/coresight-etm3x.c
  52. 14 23
      drivers/hwtracing/coresight/coresight-etm4x.c
  53. 4 17
      drivers/hwtracing/coresight/coresight-funnel.c
  54. 15 0
      drivers/hwtracing/coresight/coresight-priv.h
  55. 2 17
      drivers/hwtracing/coresight/coresight-replicator-qcom.c
  56. 3 22
      drivers/hwtracing/coresight/coresight-replicator.c
  57. 7 28
      drivers/hwtracing/coresight/coresight-tmc.c
  58. 5 18
      drivers/hwtracing/coresight/coresight-tpiu.c
  59. 279 109
      drivers/hwtracing/coresight/coresight.c
  60. 1 2
      drivers/hwtracing/coresight/of_coresight.c
  61. 1 0
      drivers/hwtracing/intel_th/Kconfig
  62. 27 3
      drivers/hwtracing/intel_th/core.c
  63. 13 19
      drivers/hwtracing/intel_th/gth.c
  64. 0 3
      drivers/hwtracing/intel_th/gth.h
  65. 41 0
      drivers/hwtracing/intel_th/intel_th.h
  66. 4 5
      drivers/hwtracing/intel_th/msu.c
  67. 10 2
      drivers/hwtracing/intel_th/pci.c
  68. 8 3
      drivers/hwtracing/intel_th/sth.c
  69. 16 0
      drivers/hwtracing/stm/Kconfig
  70. 2 0
      drivers/hwtracing/stm/Makefile
  71. 136 39
      drivers/hwtracing/stm/core.c
  72. 62 9
      drivers/hwtracing/stm/dummy_stm.c
  73. 130 0
      drivers/hwtracing/stm/heartbeat.c
  74. 19 6
      drivers/hwtracing/stm/policy.c
  75. 2 0
      drivers/hwtracing/stm/stm.h
  76. 2 2
      drivers/misc/Kconfig
  77. 2 2
      drivers/misc/ad525x_dpot.c
  78. 4 4
      drivers/misc/apds990x.c
  79. 2 22
      drivers/misc/arm-charlcd.c
  80. 4 4
      drivers/misc/bh1770glc.c
  81. 2 6
      drivers/misc/c2port/core.c
  82. 2 3
      drivers/misc/cxl/sysfs.c
  83. 6 0
      drivers/misc/eeprom/Kconfig
  84. 70 60
      drivers/misc/eeprom/at24.c
  85. 68 80
      drivers/misc/eeprom/at25.c
  86. 1 1
      drivers/misc/eeprom/eeprom.c
  87. 269 63
      drivers/misc/eeprom/eeprom_93xx46.c
  88. 1 1
      drivers/misc/genwqe/card_sysfs.c
  89. 6 3
      drivers/misc/ibmasm/ibmasm.h
  90. 4 4
      drivers/misc/lis3lv02d/lis3lv02d_i2c.c
  91. 120 4
      drivers/misc/lkdtm.c
  92. 3 3
      drivers/misc/mei/Kconfig
  93. 0 1
      drivers/misc/mei/Makefile
  94. 46 84
      drivers/misc/mei/amthif.c
  95. 34 7
      drivers/misc/mei/bus-fixup.c
  96. 45 12
      drivers/misc/mei/bus.c
  97. 90 99
      drivers/misc/mei/client.c
  98. 13 14
      drivers/misc/mei/client.h
  99. 55 10
      drivers/misc/mei/debugfs.c
  100. 20 4
      drivers/misc/mei/hbm.c

+ 14 - 0
Documentation/ABI/stable/sysfs-bus-vmbus

@@ -27,3 +27,17 @@ Description:	The mapping of which primary/sub channels are bound to which
 		Virtual Processors.
 		Format: <channel's child_relid:the bound cpu's number>
 Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/device
+Date:		Dec. 2015
+KernelVersion:	4.5
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The 16 bit device ID of the device
+Users:		tools/hv/lsvmbus and user level RDMA libraries
+
+What:		/sys/bus/vmbus/devices/vmbus_*/vendor
+Date:		Dec. 2015
+KernelVersion:	4.5
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The 16 bit vendor ID of the device
+Users:		tools/hv/lsvmbus and user level RDMA libraries

+ 17 - 0
Documentation/devicetree/bindings/goldfish/pipe.txt

@@ -0,0 +1,17 @@
+Android Goldfish QEMU Pipe
+
+Andorid pipe virtual device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,android-pipe" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+	android_pipe@a010000 {
+		compatible = "google,android-pipe";
+		reg = <ff018000 0x2000>;
+		interrupts = <0x12>;
+	};

+ 25 - 0
Documentation/devicetree/bindings/misc/eeprom-93xx46.txt

@@ -0,0 +1,25 @@
+EEPROMs (SPI) compatible with Microchip Technology 93xx46 family.
+
+Required properties:
+- compatible : shall be one of:
+    "atmel,at93c46d"
+    "eeprom-93xx46"
+- data-size : number of data bits per word (either 8 or 16)
+
+Optional properties:
+- read-only : parameter-less property which disables writes to the EEPROM
+- select-gpios : if present, specifies the GPIO that will be asserted prior to
+  each access to the EEPROM (e.g. for SPI bus multiplexing)
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply.  In particular, "reg" and "spi-max-frequency" properties must be given.
+
+Example:
+	eeprom@0 {
+		compatible = "eeprom-93xx46";
+		reg = <0>;
+		spi-max-frequency = <1000000>;
+		spi-cs-high;
+		data-size = <8>;
+		select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>;
+	};

+ 28 - 0
Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt

@@ -0,0 +1,28 @@
+* NXP LPC18xx EEPROM memory NVMEM driver
+
+Required properties:
+  - compatible: Should be "nxp,lpc1857-eeprom"
+  - reg: Must contain an entry with the physical base address and length
+    for each entry in reg-names.
+  - reg-names: Must include the following entries.
+    - reg: EEPROM registers.
+    - mem: EEPROM address space.
+  - clocks: Must contain an entry for each entry in clock-names.
+  - clock-names: Must include the following entries.
+    - eeprom: EEPROM operating clock.
+  - resets: Should contain a reference to the reset controller asserting
+    the EEPROM in reset.
+  - interrupts: Should contain EEPROM interrupt.
+
+Example:
+
+  eeprom: eeprom@4000e000 {
+    compatible = "nxp,lpc1857-eeprom";
+    reg = <0x4000e000 0x1000>,
+          <0x20040000 0x4000>;
+    reg-names = "reg", "mem";
+    clocks = <&ccu1 CLK_CPU_EEPROM>;
+    clock-names = "eeprom";
+    resets = <&rgu 27>;
+    interrupts = <4>;
+  };

+ 36 - 0
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt

@@ -0,0 +1,36 @@
+= Mediatek MTK-EFUSE device tree bindings =
+
+This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs.
+
+Required properties:
+- compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of MTK-EFUSE, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+	efuse: efuse@10206000 {
+		compatible = "mediatek,mt8173-efuse";
+		reg	   = <0 0x10206000 0 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* Data cells */
+		thermal_calibration: calib@528 {
+			reg = <0x528 0xc>;
+		};
+	};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+	thermal {
+		...
+		nvmem-cells = <&thermal_calibration>;
+		nvmem-cell-names = "calibration";
+	};

+ 30 - 24
Documentation/mic/mic_overview.txt

@@ -12,10 +12,19 @@ for the X100 devices.
 
 Since it is a PCIe card, it does not have the ability to host hardware
 devices for networking, storage and console. We provide these devices
-on X100 coprocessors thus enabling a self-bootable equivalent environment
-for applications. A key benefit of our solution is that it leverages
-the standard virtio framework for network, disk and console devices,
-though in our case the virtio framework is used across a PCIe bus.
+on X100 coprocessors thus enabling a self-bootable equivalent
+environment for applications. A key benefit of our solution is that it
+leverages the standard virtio framework for network, disk and console
+devices, though in our case the virtio framework is used across a PCIe
+bus. A Virtio Over PCIe (VOP) driver allows creating user space
+backends or devices on the host which are used to probe virtio drivers
+for these devices on the MIC card. The existing VRINGH infrastructure
+in the kernel is used to access virtio rings from the host. The card
+VOP driver allows card virtio drivers to communicate with their user
+space backends on the host via a device page. Ring 3 apps on the host
+can add, remove and configure virtio devices. A thin MIC specific
+virtio_config_ops is implemented which is borrowed heavily from
+previous similar implementations in lguest and s390.
 
 MIC PCIe card has a dma controller with 8 channels. These channels are
 shared between the host s/w and the card s/w. 0 to 3 are used by host
@@ -38,7 +47,6 @@ single threaded performance for the host compared to MIC, the ability of
 the host to initiate DMA's to/from the card using the MIC DMA engine and
 the fact that the virtio block storage backend can only be on the host.
 
-                                      |
                +----------+           |             +----------+
                | Card OS  |           |             | Host OS  |
                +----------+           |             +----------+
@@ -47,27 +55,25 @@ the fact that the virtio block storage backend can only be on the host.
         | Virtio| |Virtio  | |Virtio| | |Virtio   |  |Virtio  | |Virtio  |
         | Net   | |Console | |Block | | |Net      |  |Console | |Block   |
         | Driver| |Driver  | |Driver| | |backend  |  |backend | |backend |
-        +-------+ +--------+ +------+ | +---------+  +--------+ +--------+
+        +---+---+ +---+----+ +--+---+ | +---------+  +----+---+ +--------+
             |         |         |     |      |            |         |
             |         |         |     |User  |            |         |
-            |         |         |     |------|------------|---------|-------
-            +-------------------+     |Kernel +--------------------------+
-                      |               |       | Virtio over PCIe IOCTLs  |
-                      |               |       +--------------------------+
-+-----------+         |               |                   |  +-----------+
-| MIC DMA   |         |      +------+ | +------+ +------+ |  | MIC DMA   |
-| Driver    |         |      | SCIF | | | SCIF | | COSM | |  | Driver    |
-+-----------+         |      +------+ | +------+ +--+---+ |  +-----------+
-      |               |         |     |    |        |     |        |
-+---------------+     |      +------+ | +--+---+ +--+---+ | +----------------+
-|MIC virtual Bus|     |      |SCIF  | | |SCIF  | | COSM | | |MIC virtual Bus |
-+---------------+     |      |HW Bus| | |HW Bus| | Bus  | | +----------------+
-      |               |      +------+ | +--+---+ +------+ |              |
-      |               |         |     |       |     |     |              |
-      |   +-----------+---+     |     |       |    +---------------+     |
-      |   |Intel MIC      |     |     |       |    |Intel MIC      |     |
-      +---|Card Driver    |     |     |       |    |Host Driver    |     |
-          +------------+--------+     |       +----+---------------+-----+
+            |         |         |     |------|------------|--+------|-------
+            +---------+---------+     |Kernel                |
+                      |               |                      |
+  +---------+     +---+----+ +------+ | +------+ +------+ +--+---+  +-------+
+  |MIC DMA  |     |  VOP   | | SCIF | | | SCIF | | COSM | | VOP  |  |MIC DMA|
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+      |               |         |     |    |        |                    |
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+  |MIC      |     |  VOP   | |SCIF  | | |SCIF  | | COSM | | VOP  |  | MIC   |
+  |HW Bus   |     |  HW Bus| |HW Bus| | |HW Bus| | Bus  | |HW Bus|  |HW Bus |
+  +---------+     +--------+ +--+---+ | +--+---+ +------+ +------+  +-------+
+      |               |         |     |       |     |                    |
+      |   +-----------+--+      |     |       |    +---------------+     |
+      |   |Intel MIC     |      |     |       |    |Intel MIC      |     |
+      |   |Card Driver   |      |     |       |    |Host Driver    |     |
+      +---+--------------+------+     |       +----+---------------+-----+
                  |                    |                   |
              +-------------------------------------------------------------+
              |                                                             |

+ 1 - 1
Documentation/mic/mpssd/mpss

@@ -35,7 +35,7 @@
 
 exec=/usr/sbin/mpssd
 sysfs="/sys/class/mic"
-mic_modules="mic_host mic_x100_dma scif"
+mic_modules="mic_host mic_x100_dma scif vop"
 
 start()
 {

+ 1 - 1
Documentation/mic/mpssd/mpssd.c

@@ -926,7 +926,7 @@ add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd)
 	char path[PATH_MAX];
 	int fd, err;
 
-	snprintf(path, PATH_MAX, "/dev/mic%d", mic->id);
+	snprintf(path, PATH_MAX, "/dev/vop_virtio%d", mic->id);
 	fd = open(path, O_RDWR);
 	if (fd < 0) {
 		mpsslog("Could not open %s %s\n", path, strerror(errno));

+ 6 - 6
Documentation/misc-devices/mei/mei.txt

@@ -231,15 +231,15 @@ IT knows when a platform crashes even when there is a hard failure on the host.
 The Intel AMT Watchdog is composed of two parts:
 	1) Firmware feature - receives the heartbeats
 	   and sends an event when the heartbeats stop.
-	2) Intel MEI driver - connects to the watchdog feature, configures the
-	   watchdog and sends the heartbeats.
+	2) Intel MEI iAMT watchdog driver - connects to the watchdog feature,
+	   configures the watchdog and sends the heartbeats.
 
-The Intel MEI driver uses the kernel watchdog API to configure the Intel AMT
-Watchdog and to send heartbeats to it. The default timeout of the
+The Intel iAMT watchdog MEI driver uses the kernel watchdog API to configure
+the Intel AMT Watchdog and to send heartbeats to it. The default timeout of the
 watchdog is 120 seconds.
 
-If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
-the Intel MEI driver will disable the sending of heartbeats.
+If the Intel AMT is not enabled in the firmware then the watchdog client won't enumerate
+on the me client bus and watchdog devices won't be exposed.
 
 
 Supported Chipsets

+ 6 - 0
MAINTAINERS

@@ -5765,6 +5765,7 @@ S:	Supported
 F:	include/uapi/linux/mei.h
 F:	include/linux/mei_cl_bus.h
 F:	drivers/misc/mei/*
+F:	drivers/watchdog/mei_wdt.c
 F:	Documentation/misc-devices/mei/*
 
 INTEL MIC DRIVERS (mic)
@@ -6598,6 +6599,11 @@ F:	samples/livepatch/
 L:	live-patching@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
 
+LINUX KERNEL DUMP TEST MODULE (LKDTM)
+M:	Kees Cook <keescook@chromium.org>
+S:	Maintained
+F:	drivers/misc/lkdtm.c
+
 LLC (802.2)
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 S:	Maintained

+ 1 - 2
arch/arm/boot/dts/am57xx-beagle-x15.dts

@@ -562,8 +562,7 @@
 		extcon_usb2: tps659038_usb {
 			compatible = "ti,palmas-usb-vid";
 			ti,enable-vbus-detection;
-			ti,enable-id-detection;
-			id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
+			vbus-gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
 		};
 
 	};

+ 3 - 2
arch/arm/mach-davinci/board-mityomapl138.c

@@ -115,13 +115,14 @@ static void mityomapl138_cpufreq_init(const char *partnum)
 static void mityomapl138_cpufreq_init(const char *partnum) { }
 #endif
 
-static void read_factory_config(struct memory_accessor *a, void *context)
+static void read_factory_config(struct nvmem_device *nvmem, void *context)
 {
 	int ret;
 	const char *partnum = NULL;
 	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
-	ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config));
+	ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
+				&factory_config);
 	if (ret != sizeof(struct factory_config)) {
 		pr_warn("Read Factory Config Failed: %d\n", ret);
 		goto bad_config;

+ 2 - 2
arch/arm/mach-davinci/common.c

@@ -28,13 +28,13 @@ EXPORT_SYMBOL(davinci_soc_info);
 void __iomem *davinci_intc_base;
 int davinci_intc_type;
 
-void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context)
+void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
 {
 	char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
 	off_t offset = (off_t)context;
 
 	/* Read MAC addr from EEPROM */
-	if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+	if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
 		pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
 }
 

+ 22 - 4
drivers/android/binder.c

@@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc,
 	struct binder_transaction *t;
 	struct binder_work *tcomplete;
 	binder_size_t *offp, *off_end;
+	binder_size_t off_min;
 	struct binder_proc *target_proc;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
@@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc,
 		goto err_bad_offset;
 	}
 	off_end = (void *)offp + tr->offsets_size;
+	off_min = 0;
 	for (; offp < off_end; offp++) {
 		struct flat_binder_object *fp;
 
 		if (*offp > t->buffer->data_size - sizeof(*fp) ||
+		    *offp < off_min ||
 		    t->buffer->data_size < sizeof(*fp) ||
 		    !IS_ALIGNED(*offp, sizeof(u32))) {
-			binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
-					  proc->pid, thread->pid, (u64)*offp);
+			binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+					  proc->pid, thread->pid, (u64)*offp,
+					  (u64)off_min,
+					  (u64)(t->buffer->data_size -
+					  sizeof(*fp)));
 			return_error = BR_FAILED_REPLY;
 			goto err_bad_offset;
 		}
 		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+		off_min = *offp + sizeof(struct flat_binder_object);
 		switch (fp->type) {
 		case BINDER_TYPE_BINDER:
 		case BINDER_TYPE_WEAK_BINDER: {
@@ -3593,13 +3600,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
 
 static int binder_proc_show(struct seq_file *m, void *unused)
 {
+	struct binder_proc *itr;
 	struct binder_proc *proc = m->private;
 	int do_lock = !binder_debug_no_lock;
+	bool valid_proc = false;
 
 	if (do_lock)
 		binder_lock(__func__);
-	seq_puts(m, "binder proc state:\n");
-	print_binder_proc(m, proc, 1);
+
+	hlist_for_each_entry(itr, &binder_procs, proc_node) {
+		if (itr == proc) {
+			valid_proc = true;
+			break;
+		}
+	}
+	if (valid_proc) {
+		seq_puts(m, "binder proc state:\n");
+		print_binder_proc(m, proc, 1);
+	}
 	if (do_lock)
 		binder_unlock(__func__);
 	return 0;

+ 4 - 5
drivers/base/firmware_class.c

@@ -258,7 +258,7 @@ static void __fw_free_buf(struct kref *ref)
 		vunmap(buf->data);
 		for (i = 0; i < buf->nr_pages; i++)
 			__free_page(buf->pages[i]);
-		kfree(buf->pages);
+		vfree(buf->pages);
 	} else
 #endif
 		vfree(buf->data);
@@ -635,7 +635,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 		if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
 			for (i = 0; i < fw_buf->nr_pages; i++)
 				__free_page(fw_buf->pages[i]);
-			kfree(fw_buf->pages);
+			vfree(fw_buf->pages);
 			fw_buf->pages = NULL;
 			fw_buf->page_array_size = 0;
 			fw_buf->nr_pages = 0;
@@ -746,8 +746,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
 					 buf->page_array_size * 2);
 		struct page **new_pages;
 
-		new_pages = kmalloc(new_array_size * sizeof(void *),
-				    GFP_KERNEL);
+		new_pages = vmalloc(new_array_size * sizeof(void *));
 		if (!new_pages) {
 			fw_load_abort(fw_priv);
 			return -ENOMEM;
@@ -756,7 +755,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
 		       buf->page_array_size * sizeof(void *));
 		memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
 		       (new_array_size - buf->page_array_size));
-		kfree(buf->pages);
+		vfree(buf->pages);
 		buf->pages = new_pages;
 		buf->page_array_size = new_array_size;
 	}

+ 2 - 1
drivers/char/Kconfig

@@ -328,7 +328,8 @@ config JS_RTC
 
 config GEN_RTC
 	tristate "Generic /dev/rtc emulation"
-	depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
+	depends on RTC!=y
+	depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
 	---help---
 	  If you say Y here and create a character special file /dev/rtc with
 	  major number 10 and minor number 135 using mknod ("man mknod"), you

+ 1 - 1
drivers/char/mem.c

@@ -695,7 +695,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 		offset += file->f_pos;
 	case SEEK_SET:
 		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
-		if (IS_ERR_VALUE((unsigned long long)offset)) {
+		if ((unsigned long long)offset >= -MAX_ERRNO) {
 			ret = -EOVERFLOW;
 			break;
 		}

+ 6 - 6
drivers/char/nvram.c

@@ -496,12 +496,12 @@ static void pc_set_checksum(void)
 
 #ifdef CONFIG_PROC_FS
 
-static char *floppy_types[] = {
+static const char * const floppy_types[] = {
 	"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
 	"3.5'' 2.88M", "3.5'' 2.88M"
 };
 
-static char *gfx_types[] = {
+static const char * const gfx_types[] = {
 	"EGA, VGA, ... (with BIOS)",
 	"CGA (40 cols)",
 	"CGA (80 cols)",
@@ -602,7 +602,7 @@ static void atari_set_checksum(void)
 
 static struct {
 	unsigned char val;
-	char *name;
+	const char *name;
 } boot_prefs[] = {
 	{ 0x80, "TOS" },
 	{ 0x40, "ASV" },
@@ -611,7 +611,7 @@ static struct {
 	{ 0x00, "unspecified" }
 };
 
-static char *languages[] = {
+static const char * const languages[] = {
 	"English (US)",
 	"German",
 	"French",
@@ -623,7 +623,7 @@ static char *languages[] = {
 	"Swiss (German)"
 };
 
-static char *dateformat[] = {
+static const char * const dateformat[] = {
 	"MM%cDD%cYY",
 	"DD%cMM%cYY",
 	"YY%cMM%cDD",
@@ -634,7 +634,7 @@ static char *dateformat[] = {
 	"7 (undefined)"
 };
 
-static char *colors[] = {
+static const char * const colors[] = {
 	"2", "4", "16", "256", "65536", "??", "??", "??"
 };
 

+ 2 - 3
drivers/char/nwbutton.c

@@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount)
 
 static void button_sequence_finished (unsigned long parameters)
 {
-#ifdef CONFIG_NWBUTTON_REBOOT		/* Reboot using button is enabled */
-	if (button_press_count == reboot_count)
+	if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+	    button_press_count == reboot_count)
 		kill_cad_pid(SIGINT, 1);	/* Ask init to reboot us */
-#endif /* CONFIG_NWBUTTON_REBOOT */
 	button_consume_callbacks (button_press_count);
 	bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
 	button_press_count = 0;		/* Reset the button press counter */

+ 226 - 171
drivers/char/ppdev.c

@@ -69,12 +69,13 @@
 #include <linux/ppdev.h>
 #include <linux/mutex.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #define PP_VERSION "ppdev: user-space parallel port driver"
 #define CHRDEV "ppdev"
 
 struct pp_struct {
-	struct pardevice * pdev;
+	struct pardevice *pdev;
 	wait_queue_head_t irq_wait;
 	atomic_t irqc;
 	unsigned int flags;
@@ -98,18 +99,26 @@ struct pp_struct {
 #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
 
 static DEFINE_MUTEX(pp_do_mutex);
-static inline void pp_enable_irq (struct pp_struct *pp)
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32	_IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32	_IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64	_IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64	_IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
 {
 	struct parport *port = pp->pdev->port;
-	port->ops->enable_irq (port);
+
+	port->ops->enable_irq(port);
 }
 
-static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
-			loff_t * ppos)
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+		       loff_t *ppos)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	char * kbuffer;
+	char *kbuffer;
 	ssize_t bytes_read = 0;
 	struct parport *pport;
 	int mode;
@@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
 		return 0;
 
 	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-	if (!kbuffer) {
+	if (!kbuffer)
 		return -ENOMEM;
-	}
 	pport = pp->pdev->port;
 	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-	parport_set_timeout (pp->pdev,
-			     (file->f_flags & O_NONBLOCK) ?
-			     PARPORT_INACTIVITY_O_NONBLOCK :
-			     pp->default_inactivity);
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
 
 	while (bytes_read == 0) {
 		ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
@@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
 			int flags = 0;
 			size_t (*fn)(struct parport *, void *, size_t, int);
 
-			if (pp->flags & PP_W91284PIC) {
+			if (pp->flags & PP_W91284PIC)
 				flags |= PARPORT_W91284PIC;
-			}
-			if (pp->flags & PP_FASTREAD) {
+			if (pp->flags & PP_FASTREAD)
 				flags |= PARPORT_EPP_FAST;
-			}
-			if (pport->ieee1284.mode & IEEE1284_ADDR) {
+			if (pport->ieee1284.mode & IEEE1284_ADDR)
 				fn = pport->ops->epp_read_addr;
-			} else {
+			else
 				fn = pport->ops->epp_read_data;
-			}
 			bytes_read = (*fn)(pport, kbuffer, need, flags);
 		} else {
-			bytes_read = parport_read (pport, kbuffer, need);
+			bytes_read = parport_read(pport, kbuffer, need);
 		}
 
 		if (bytes_read != 0)
@@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
 			break;
 		}
 
-		if (signal_pending (current)) {
+		if (signal_pending(current)) {
 			bytes_read = -ERESTARTSYS;
 			break;
 		}
@@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
 		cond_resched();
 	}
 
-	parport_set_timeout (pp->pdev, pp->default_inactivity);
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-	if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+	if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
 		bytes_read = -EFAULT;
 
-	kfree (kbuffer);
-	pp_enable_irq (pp);
+	kfree(kbuffer);
+	pp_enable_irq(pp);
 	return bytes_read;
 }
 
-static ssize_t pp_write (struct file * file, const char __user * buf,
-			 size_t count, loff_t * ppos)
+static ssize_t pp_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	char * kbuffer;
+	char *kbuffer;
 	ssize_t bytes_written = 0;
 	ssize_t wrote;
 	int mode;
@@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
 	}
 
 	kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-	if (!kbuffer) {
+	if (!kbuffer)
 		return -ENOMEM;
-	}
+
 	pport = pp->pdev->port;
 	mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-	parport_set_timeout (pp->pdev,
-			     (file->f_flags & O_NONBLOCK) ?
-			     PARPORT_INACTIVITY_O_NONBLOCK :
-			     pp->default_inactivity);
+	parport_set_timeout(pp->pdev,
+			    (file->f_flags & O_NONBLOCK) ?
+			    PARPORT_INACTIVITY_O_NONBLOCK :
+			    pp->default_inactivity);
 
 	while (bytes_written < count) {
 		ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
 
-		if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+		if (copy_from_user(kbuffer, buf + bytes_written, n)) {
 			bytes_written = -EFAULT;
 			break;
 		}
@@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
 		if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
 			/* do a fast EPP write */
 			if (pport->ieee1284.mode & IEEE1284_ADDR) {
-				wrote = pport->ops->epp_write_addr (pport,
+				wrote = pport->ops->epp_write_addr(pport,
 					kbuffer, n, PARPORT_EPP_FAST);
 			} else {
-				wrote = pport->ops->epp_write_data (pport,
+				wrote = pport->ops->epp_write_data(pport,
 					kbuffer, n, PARPORT_EPP_FAST);
 			}
 		} else {
-			wrote = parport_write (pp->pdev->port, kbuffer, n);
+			wrote = parport_write(pp->pdev->port, kbuffer, n);
 		}
 
 		if (wrote <= 0) {
-			if (!bytes_written) {
+			if (!bytes_written)
 				bytes_written = wrote;
-			}
 			break;
 		}
 
@@ -251,67 +255,69 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
 			break;
 		}
 
-		if (signal_pending (current))
+		if (signal_pending(current))
 			break;
 
 		cond_resched();
 	}
 
-	parport_set_timeout (pp->pdev, pp->default_inactivity);
+	parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-	kfree (kbuffer);
-	pp_enable_irq (pp);
+	kfree(kbuffer);
+	pp_enable_irq(pp);
 	return bytes_written;
 }
 
-static void pp_irq (void *private)
+static void pp_irq(void *private)
 {
 	struct pp_struct *pp = private;
 
 	if (pp->irqresponse) {
-		parport_write_control (pp->pdev->port, pp->irqctl);
+		parport_write_control(pp->pdev->port, pp->irqctl);
 		pp->irqresponse = 0;
 	}
 
-	atomic_inc (&pp->irqc);
-	wake_up_interruptible (&pp->irq_wait);
+	atomic_inc(&pp->irqc);
+	wake_up_interruptible(&pp->irq_wait);
 }
 
-static int register_device (int minor, struct pp_struct *pp)
+static int register_device(int minor, struct pp_struct *pp)
 {
 	struct parport *port;
-	struct pardevice * pdev = NULL;
+	struct pardevice *pdev = NULL;
 	char *name;
-	int fl;
+	struct pardev_cb ppdev_cb;
 
 	name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
 	if (name == NULL)
 		return -ENOMEM;
 
-	port = parport_find_number (minor);
+	port = parport_find_number(minor);
 	if (!port) {
-		printk (KERN_WARNING "%s: no associated port!\n", name);
-		kfree (name);
+		printk(KERN_WARNING "%s: no associated port!\n", name);
+		kfree(name);
 		return -ENXIO;
 	}
 
-	fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
-	pdev = parport_register_device (port, name, NULL,
-					NULL, pp_irq, fl, pp);
-	parport_put_port (port);
+	memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+	ppdev_cb.irq_func = pp_irq;
+	ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+	ppdev_cb.private = pp;
+	pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+	parport_put_port(port);
 
 	if (!pdev) {
-		printk (KERN_WARNING "%s: failed to register device!\n", name);
-		kfree (name);
+		printk(KERN_WARNING "%s: failed to register device!\n", name);
+		kfree(name);
 		return -ENXIO;
 	}
 
 	pp->pdev = pdev;
-	pr_debug("%s: registered pardevice\n", name);
+	dev_dbg(&pdev->dev, "registered pardevice\n");
 	return 0;
 }
 
-static enum ieee1284_phase init_phase (int mode)
+static enum ieee1284_phase init_phase(int mode)
 {
 	switch (mode & ~(IEEE1284_DEVICEID
 			 | IEEE1284_ADDR)) {
@@ -322,11 +328,27 @@ static enum ieee1284_phase init_phase (int mode)
 	return IEEE1284_PH_FWD_IDLE;
 }
 
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+	long to_jiffies;
+
+	if ((tv_sec < 0) || (tv_usec < 0))
+		return -EINVAL;
+
+	to_jiffies = usecs_to_jiffies(tv_usec);
+	to_jiffies += tv_sec * HZ;
+	if (to_jiffies <= 0)
+		return -EINVAL;
+
+	pdev->timeout = to_jiffies;
+	return 0;
+}
+
 static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	unsigned int minor = iminor(file_inode(file));
 	struct pp_struct *pp = file->private_data;
-	struct parport * port;
+	struct parport *port;
 	void __user *argp = (void __user *)arg;
 
 	/* First handle the cases that don't take arguments. */
@@ -337,19 +359,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		int ret;
 
 		if (pp->flags & PP_CLAIMED) {
-			pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+			dev_dbg(&pp->pdev->dev, "you've already got it!\n");
 			return -EINVAL;
 		}
 
 		/* Deferred device registration. */
 		if (!pp->pdev) {
-			int err = register_device (minor, pp);
-			if (err) {
+			int err = register_device(minor, pp);
+
+			if (err)
 				return err;
-			}
 		}
 
-		ret = parport_claim_or_block (pp->pdev);
+		ret = parport_claim_or_block(pp->pdev);
 		if (ret < 0)
 			return ret;
 
@@ -357,7 +379,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 		/* For interrupt-reporting to work, we need to be
 		 * informed of each interrupt. */
-		pp_enable_irq (pp);
+		pp_enable_irq(pp);
 
 		/* We may need to fix up the state machine. */
 		info = &pp->pdev->port->ieee1284;
@@ -365,15 +387,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		pp->saved_state.phase = info->phase;
 		info->mode = pp->state.mode;
 		info->phase = pp->state.phase;
-		pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
-		parport_set_timeout (pp->pdev, pp->default_inactivity);
+		pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+		parport_set_timeout(pp->pdev, pp->default_inactivity);
 
 		return 0;
 	    }
 	case PPEXCL:
 		if (pp->pdev) {
-			pr_debug(CHRDEV "%x: too late for PPEXCL; "
-				"already registered\n", minor);
+			dev_dbg(&pp->pdev->dev,
+				"too late for PPEXCL; already registered\n");
 			if (pp->flags & PP_EXCL)
 				/* But it's not really an error. */
 				return 0;
@@ -388,11 +410,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	case PPSETMODE:
 	    {
 		int mode;
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
 		/* FIXME: validate mode */
 		pp->state.mode = mode;
-		pp->state.phase = init_phase (mode);
+		pp->state.phase = init_phase(mode);
 
 		if (pp->flags & PP_CLAIMED) {
 			pp->pdev->port->ieee1284.mode = mode;
@@ -405,28 +428,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	    {
 		int mode;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			mode = pp->pdev->port->ieee1284.mode;
-		} else {
+		else
 			mode = pp->state.mode;
-		}
-		if (copy_to_user (argp, &mode, sizeof (mode))) {
+
+		if (copy_to_user(argp, &mode, sizeof(mode)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPSETPHASE:
 	    {
 		int phase;
-		if (copy_from_user (&phase, argp, sizeof (phase))) {
+
+		if (copy_from_user(&phase, argp, sizeof(phase)))
 			return -EFAULT;
-		}
+
 		/* FIXME: validate phase */
 		pp->state.phase = phase;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			pp->pdev->port->ieee1284.phase = phase;
-		}
 
 		return 0;
 	    }
@@ -434,38 +456,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	    {
 		int phase;
 
-		if (pp->flags & PP_CLAIMED) {
+		if (pp->flags & PP_CLAIMED)
 			phase = pp->pdev->port->ieee1284.phase;
-		} else {
+		else
 			phase = pp->state.phase;
-		}
-		if (copy_to_user (argp, &phase, sizeof (phase))) {
+		if (copy_to_user(argp, &phase, sizeof(phase)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPGETMODES:
 	    {
 		unsigned int modes;
 
-		port = parport_find_number (minor);
+		port = parport_find_number(minor);
 		if (!port)
 			return -ENODEV;
 
 		modes = port->modes;
 		parport_put_port(port);
-		if (copy_to_user (argp, &modes, sizeof (modes))) {
+		if (copy_to_user(argp, &modes, sizeof(modes)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	case PPSETFLAGS:
 	    {
 		int uflags;
 
-		if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+		if (copy_from_user(&uflags, argp, sizeof(uflags)))
 			return -EFAULT;
-		}
 		pp->flags &= ~PP_FLAGMASK;
 		pp->flags |= (uflags & PP_FLAGMASK);
 		return 0;
@@ -475,9 +493,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		int uflags;
 
 		uflags = pp->flags & PP_FLAGMASK;
-		if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+		if (copy_to_user(argp, &uflags, sizeof(uflags)))
 			return -EFAULT;
-		}
 		return 0;
 	    }
 	}	/* end switch() */
@@ -495,27 +512,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		unsigned char reg;
 		unsigned char mask;
 		int mode;
+		s32 time32[2];
+		s64 time64[2];
+		struct timespec64 ts;
 		int ret;
-		struct timeval par_timeout;
-		long to_jiffies;
 
 	case PPRSTATUS:
-		reg = parport_read_status (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_status(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPRDATA:
-		reg = parport_read_data (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_data(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPRCONTROL:
-		reg = parport_read_control (port);
-		if (copy_to_user (argp, &reg, sizeof (reg)))
+		reg = parport_read_control(port);
+		if (copy_to_user(argp, &reg, sizeof(reg)))
 			return -EFAULT;
 		return 0;
 	case PPYIELD:
-		parport_yield_blocking (pp->pdev);
+		parport_yield_blocking(pp->pdev);
 		return 0;
 
 	case PPRELEASE:
@@ -525,45 +543,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		pp->state.phase = info->phase;
 		info->mode = pp->saved_state.mode;
 		info->phase = pp->saved_state.phase;
-		parport_release (pp->pdev);
+		parport_release(pp->pdev);
 		pp->flags &= ~PP_CLAIMED;
 		return 0;
 
 	case PPWCONTROL:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
-		parport_write_control (port, reg);
+		parport_write_control(port, reg);
 		return 0;
 
 	case PPWDATA:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
-		parport_write_data (port, reg);
+		parport_write_data(port, reg);
 		return 0;
 
 	case PPFCONTROL:
-		if (copy_from_user (&mask, argp,
-				    sizeof (mask)))
+		if (copy_from_user(&mask, argp,
+				   sizeof(mask)))
 			return -EFAULT;
-		if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
-				    sizeof (reg)))
+		if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+				   sizeof(reg)))
 			return -EFAULT;
-		parport_frob_control (port, mask, reg);
+		parport_frob_control(port, mask, reg);
 		return 0;
 
 	case PPDATADIR:
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
 		if (mode)
-			port->ops->data_reverse (port);
+			port->ops->data_reverse(port);
 		else
-			port->ops->data_forward (port);
+			port->ops->data_forward(port);
 		return 0;
 
 	case PPNEGOT:
-		if (copy_from_user (&mode, argp, sizeof (mode)))
+		if (copy_from_user(&mode, argp, sizeof(mode)))
 			return -EFAULT;
-		switch ((ret = parport_negotiate (port, mode))) {
+		switch ((ret = parport_negotiate(port, mode))) {
 		case 0: break;
 		case -1: /* handshake failed, peripheral not IEEE 1284 */
 			ret = -EIO;
@@ -572,11 +590,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			ret = -ENXIO;
 			break;
 		}
-		pp_enable_irq (pp);
+		pp_enable_irq(pp);
 		return ret;
 
 	case PPWCTLONIRQ:
-		if (copy_from_user (&reg, argp, sizeof (reg)))
+		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
 
 		/* Remember what to set the control lines to, for next
@@ -586,39 +604,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		return 0;
 
 	case PPCLRIRQ:
-		ret = atomic_read (&pp->irqc);
-		if (copy_to_user (argp, &ret, sizeof (ret)))
+		ret = atomic_read(&pp->irqc);
+		if (copy_to_user(argp, &ret, sizeof(ret)))
 			return -EFAULT;
-		atomic_sub (ret, &pp->irqc);
+		atomic_sub(ret, &pp->irqc);
 		return 0;
 
-	case PPSETTIME:
-		if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+	case PPSETTIME32:
+		if (copy_from_user(time32, argp, sizeof(time32)))
 			return -EFAULT;
-		}
-		/* Convert to jiffies, place in pp->pdev->timeout */
-		if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
-			return -EINVAL;
-		}
-		to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
-		to_jiffies += par_timeout.tv_sec * (long)HZ;
-		if (to_jiffies <= 0) {
+
+		return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+	case PPSETTIME64:
+		if (copy_from_user(time64, argp, sizeof(time64)))
+			return -EFAULT;
+
+		return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+	case PPGETTIME32:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time32[0] = ts.tv_sec;
+		time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time32[0] < 0) || (time32[1] < 0))
 			return -EINVAL;
-		}
-		pp->pdev->timeout = to_jiffies;
+
+		if (copy_to_user(argp, time32, sizeof(time32)))
+			return -EFAULT;
+
 		return 0;
 
-	case PPGETTIME:
-		to_jiffies = pp->pdev->timeout;
-		memset(&par_timeout, 0, sizeof(par_timeout));
-		par_timeout.tv_sec = to_jiffies / HZ;
-		par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
-		if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+	case PPGETTIME64:
+		jiffies_to_timespec64(pp->pdev->timeout, &ts);
+		time64[0] = ts.tv_sec;
+		time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+		if ((time64[0] < 0) || (time64[1] < 0))
+			return -EINVAL;
+
+		if (copy_to_user(argp, time64, sizeof(time64)))
 			return -EFAULT;
+
 		return 0;
 
 	default:
-		pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+		dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
 		return -EINVAL;
 	}
 
@@ -629,13 +658,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	long ret;
+
 	mutex_lock(&pp_do_mutex);
 	ret = pp_do_ioctl(file, cmd, arg);
 	mutex_unlock(&pp_do_mutex);
 	return ret;
 }
 
-static int pp_open (struct inode * inode, struct file * file)
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
 {
 	unsigned int minor = iminor(inode);
 	struct pp_struct *pp;
@@ -643,16 +681,16 @@ static int pp_open (struct inode * inode, struct file * file)
 	if (minor >= PARPORT_MAX)
 		return -ENXIO;
 
-	pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+	pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
 	if (!pp)
 		return -ENOMEM;
 
 	pp->state.mode = IEEE1284_MODE_COMPAT;
-	pp->state.phase = init_phase (pp->state.mode);
+	pp->state.phase = init_phase(pp->state.mode);
 	pp->flags = 0;
 	pp->irqresponse = 0;
-	atomic_set (&pp->irqc, 0);
-	init_waitqueue_head (&pp->irq_wait);
+	atomic_set(&pp->irqc, 0);
+	init_waitqueue_head(&pp->irq_wait);
 
 	/* Defer the actual device registration until the first claim.
 	 * That way, we know whether or not the driver wants to have
@@ -664,7 +702,7 @@ static int pp_open (struct inode * inode, struct file * file)
 	return 0;
 }
 
-static int pp_release (struct inode * inode, struct file * file)
+static int pp_release(struct inode *inode, struct file *file)
 {
 	unsigned int minor = iminor(inode);
 	struct pp_struct *pp = file->private_data;
@@ -673,10 +711,10 @@ static int pp_release (struct inode * inode, struct file * file)
 	compat_negot = 0;
 	if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
 	    (pp->state.mode != IEEE1284_MODE_COMPAT)) {
-	    	struct ieee1284_info *info;
+		struct ieee1284_info *info;
 
 		/* parport released, but not in compatibility mode */
-		parport_claim_or_block (pp->pdev);
+		parport_claim_or_block(pp->pdev);
 		pp->flags |= PP_CLAIMED;
 		info = &pp->pdev->port->ieee1284;
 		pp->saved_state.mode = info->mode;
@@ -689,9 +727,9 @@ static int pp_release (struct inode * inode, struct file * file)
 		compat_negot = 2;
 	}
 	if (compat_negot) {
-		parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
-		pr_debug(CHRDEV "%x: negotiated back to compatibility "
-			"mode because user-space forgot\n", minor);
+		parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+		dev_dbg(&pp->pdev->dev,
+			"negotiated back to compatibility mode because user-space forgot\n");
 	}
 
 	if (pp->flags & PP_CLAIMED) {
@@ -702,7 +740,7 @@ static int pp_release (struct inode * inode, struct file * file)
 		pp->state.phase = info->phase;
 		info->mode = pp->saved_state.mode;
 		info->phase = pp->saved_state.phase;
-		parport_release (pp->pdev);
+		parport_release(pp->pdev);
 		if (compat_negot != 1) {
 			pr_debug(CHRDEV "%x: released pardevice "
 				"because user-space forgot\n", minor);
@@ -711,25 +749,26 @@ static int pp_release (struct inode * inode, struct file * file)
 
 	if (pp->pdev) {
 		const char *name = pp->pdev->name;
-		parport_unregister_device (pp->pdev);
-		kfree (name);
+
+		parport_unregister_device(pp->pdev);
+		kfree(name);
 		pp->pdev = NULL;
 		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
 	}
 
-	kfree (pp);
+	kfree(pp);
 
 	return 0;
 }
 
 /* No kernel lock held - fine */
-static unsigned int pp_poll (struct file * file, poll_table * wait)
+static unsigned int pp_poll(struct file *file, poll_table *wait)
 {
 	struct pp_struct *pp = file->private_data;
 	unsigned int mask = 0;
 
-	poll_wait (file, &pp->irq_wait, wait);
-	if (atomic_read (&pp->irqc))
+	poll_wait(file, &pp->irq_wait, wait);
+	if (atomic_read(&pp->irqc))
 		mask |= POLLIN | POLLRDNORM;
 
 	return mask;
@@ -744,6 +783,9 @@ static const struct file_operations pp_fops = {
 	.write		= pp_write,
 	.poll		= pp_poll,
 	.unlocked_ioctl	= pp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = pp_compat_ioctl,
+#endif
 	.open		= pp_open,
 	.release	= pp_release,
 };
@@ -759,19 +801,32 @@ static void pp_detach(struct parport *port)
 	device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
 }
 
+static int pp_probe(struct pardevice *par_dev)
+{
+	struct device_driver *drv = par_dev->dev.driver;
+	int len = strlen(drv->name);
+
+	if (strncmp(par_dev->name, drv->name, len))
+		return -ENODEV;
+
+	return 0;
+}
+
 static struct parport_driver pp_driver = {
 	.name		= CHRDEV,
-	.attach		= pp_attach,
+	.probe		= pp_probe,
+	.match_port	= pp_attach,
 	.detach		= pp_detach,
+	.devmodel	= true,
 };
 
-static int __init ppdev_init (void)
+static int __init ppdev_init(void)
 {
 	int err = 0;
 
-	if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
-		printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
-			PP_MAJOR);
+	if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+		printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
+		       PP_MAJOR);
 		return -EIO;
 	}
 	ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -781,11 +836,11 @@ static int __init ppdev_init (void)
 	}
 	err = parport_register_driver(&pp_driver);
 	if (err < 0) {
-		printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+		printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
 		goto out_class;
 	}
 
-	printk (KERN_INFO PP_VERSION "\n");
+	printk(KERN_INFO PP_VERSION "\n");
 	goto out;
 
 out_class:
@@ -796,12 +851,12 @@ out:
 	return err;
 }
 
-static void __exit ppdev_cleanup (void)
+static void __exit ppdev_cleanup(void)
 {
 	/* Clean up all parport stuff */
 	parport_unregister_driver(&pp_driver);
 	class_destroy(ppdev_class);
-	unregister_chrdev (PP_MAJOR, CHRDEV);
+	unregister_chrdev(PP_MAJOR, CHRDEV);
 }
 
 module_init(ppdev_init);

+ 1 - 3
drivers/char/raw.c

@@ -334,10 +334,8 @@ static int __init raw_init(void)
 
 	cdev_init(&raw_cdev, &raw_fops);
 	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
-	if (ret) {
+	if (ret)
 		goto error_region;
-	}
-
 	raw_class = class_create(THIS_MODULE, "raw");
 	if (IS_ERR(raw_class)) {
 		printk(KERN_ERR "Error creating raw class.\n");

+ 3 - 1
drivers/char/xillybus/xillybus_core.c

@@ -509,7 +509,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
 			channel->log2_element_size = ((format > 2) ?
 						      2 : format);
 
-			bytebufsize = channel->rd_buf_size = bufsize *
+			bytebufsize = bufsize *
 				(1 << channel->log2_element_size);
 
 			buffers = devm_kcalloc(dev, bufnum,
@@ -523,6 +523,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
 
 		if (!is_writebuf) {
 			channel->num_rd_buffers = bufnum;
+			channel->rd_buf_size = bytebufsize;
 			channel->rd_allow_partial = allowpartial;
 			channel->rd_synchronous = synchronous;
 			channel->rd_exclusive_open = exclusive_open;
@@ -533,6 +534,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
 						   bufnum, bytebufsize);
 		} else if (channelnum > 0) {
 			channel->num_wr_buffers = bufnum;
+			channel->wr_buf_size = bytebufsize;
 
 			channel->seekable = seekable;
 			channel->wr_supports_nonempty = supports_nonempty;

+ 2 - 2
drivers/extcon/extcon-arizona.c

@@ -185,7 +185,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
 		break;
 	};
 
-	mutex_lock(&arizona->dapm->card->dapm_mutex);
+	snd_soc_dapm_mutex_lock(arizona->dapm);
 
 	arizona->hpdet_clamp = clamp;
 
@@ -227,7 +227,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
 				 ret);
 	}
 
-	mutex_unlock(&arizona->dapm->card->dapm_mutex);
+	snd_soc_dapm_mutex_unlock(arizona->dapm);
 }
 
 static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)

+ 1 - 1
drivers/extcon/extcon-gpio.c

@@ -126,7 +126,7 @@ static int gpio_extcon_probe(struct platform_device *pdev)
 	INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
 
 	/*
-	 * Request the interrput of gpio to detect whether external connector
+	 * Request the interrupt of gpio to detect whether external connector
 	 * is attached or detached.
 	 */
 	ret = devm_request_any_context_irq(&pdev->dev, data->irq,

+ 3 - 0
drivers/extcon/extcon-max14577.c

@@ -150,6 +150,7 @@ enum max14577_muic_acc_type {
 
 static const unsigned int max14577_extcon_cable[] = {
 	EXTCON_USB,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_CHG_USB_FAST,
 	EXTCON_CHG_USB_SLOW,
@@ -454,6 +455,8 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
 			return ret;
 
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 		break;
 	case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
 		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,

+ 11 - 1
drivers/extcon/extcon-max77693.c

@@ -204,6 +204,7 @@ enum max77693_muic_acc_type {
 static const unsigned int max77693_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_CHG_USB_FAST,
 	EXTCON_CHG_USB_SLOW,
@@ -512,8 +513,11 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
 		break;
 	case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:		/* Dock-Audio */
 		dock_id = EXTCON_DOCK;
-		if (!attached)
+		if (!attached) {
 			extcon_set_cable_state_(info->edev, EXTCON_USB, false);
+			extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+						false);
+		}
 		break;
 	default:
 		dev_err(info->dev, "failed to detect %s dock device\n",
@@ -601,6 +605,8 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
 		if (ret < 0)
 			return ret;
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 		break;
 	case MAX77693_MUIC_GND_MHL:
 	case MAX77693_MUIC_GND_MHL_VB:
@@ -830,6 +836,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
 			 */
 			extcon_set_cable_state_(info->edev, EXTCON_USB,
 						attached);
+			extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+						attached);
 
 			if (!cable_attached)
 				extcon_set_cable_state_(info->edev, EXTCON_DOCK,
@@ -899,6 +907,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
 
 			extcon_set_cable_state_(info->edev, EXTCON_USB,
 						attached);
+			extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+						attached);
 			break;
 		case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
 			/* Only TA cable */

+ 4 - 1
drivers/extcon/extcon-max77843.c

@@ -122,6 +122,7 @@ enum max77843_muic_charger_type {
 static const unsigned int max77843_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_CHG_USB_CDP,
 	EXTCON_CHG_USB_FAST,
@@ -486,6 +487,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
 			return ret;
 
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 		break;
 	case MAX77843_MUIC_CHG_DOWNSTREAM:
 		ret = max77843_muic_set_path(info,
@@ -803,7 +806,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
 	/* Clear IRQ bits before request IRQs */
 	ret = regmap_bulk_read(max77843->regmap_muic,
 			MAX77843_MUIC_REG_INT1, info->status,
-			MAX77843_MUIC_IRQ_NUM);
+			MAX77843_MUIC_STATUS_NUM);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
 		goto err_muic_irq;

+ 3 - 0
drivers/extcon/extcon-max8997.c

@@ -148,6 +148,7 @@ struct max8997_muic_info {
 static const unsigned int max8997_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_CHG_USB_FAST,
 	EXTCON_CHG_USB_SLOW,
@@ -334,6 +335,8 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
 		break;
 	case MAX8997_USB_DEVICE:
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 		break;
 	default:
 		dev_err(info->dev, "failed to detect %s usb cable\n",

+ 52 - 2
drivers/extcon/extcon-palmas.c

@@ -216,11 +216,23 @@ static int palmas_usb_probe(struct platform_device *pdev)
 		return PTR_ERR(palmas_usb->id_gpiod);
 	}
 
+	palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+							GPIOD_IN);
+	if (IS_ERR(palmas_usb->vbus_gpiod)) {
+		dev_err(&pdev->dev, "failed to get vbus gpio\n");
+		return PTR_ERR(palmas_usb->vbus_gpiod);
+	}
+
 	if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
 		palmas_usb->enable_id_detection = false;
 		palmas_usb->enable_gpio_id_detection = true;
 	}
 
+	if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) {
+		palmas_usb->enable_vbus_detection = false;
+		palmas_usb->enable_gpio_vbus_detection = true;
+	}
+
 	if (palmas_usb->enable_gpio_id_detection) {
 		u32 debounce;
 
@@ -266,7 +278,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
 				palmas_usb->id_irq,
 				NULL, palmas_id_irq_handler,
 				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
-				IRQF_ONESHOT | IRQF_EARLY_RESUME,
+				IRQF_ONESHOT,
 				"palmas_usb_id", palmas_usb);
 		if (status < 0) {
 			dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
@@ -304,13 +316,47 @@ static int palmas_usb_probe(struct platform_device *pdev)
 				palmas_usb->vbus_irq, NULL,
 				palmas_vbus_irq_handler,
 				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
-				IRQF_ONESHOT | IRQF_EARLY_RESUME,
+				IRQF_ONESHOT,
 				"palmas_usb_vbus", palmas_usb);
 		if (status < 0) {
 			dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
 					palmas_usb->vbus_irq, status);
 			return status;
 		}
+	} else if (palmas_usb->enable_gpio_vbus_detection) {
+		/* remux GPIO_1 as VBUSDET */
+		status = palmas_update_bits(palmas,
+			PALMAS_PU_PD_OD_BASE,
+			PALMAS_PRIMARY_SECONDARY_PAD1,
+			PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK,
+			(1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT));
+		if (status < 0) {
+			dev_err(&pdev->dev, "can't remux GPIO1\n");
+			return status;
+		}
+
+		palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+						       PALMAS_VBUS_OTG_IRQ);
+		palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod);
+		if (palmas_usb->gpio_vbus_irq < 0) {
+			dev_err(&pdev->dev, "failed to get vbus irq\n");
+			return palmas_usb->gpio_vbus_irq;
+		}
+		status = devm_request_threaded_irq(&pdev->dev,
+						palmas_usb->gpio_vbus_irq,
+						NULL,
+						palmas_vbus_irq_handler,
+						IRQF_TRIGGER_FALLING |
+						IRQF_TRIGGER_RISING |
+						IRQF_ONESHOT |
+						IRQF_EARLY_RESUME,
+						"palmas_usb_vbus",
+						palmas_usb);
+		if (status < 0) {
+			dev_err(&pdev->dev,
+				"failed to request handler for vbus irq\n");
+			return status;
+		}
 	}
 
 	palmas_enable_irq(palmas_usb);
@@ -337,6 +383,8 @@ static int palmas_usb_suspend(struct device *dev)
 	if (device_may_wakeup(dev)) {
 		if (palmas_usb->enable_vbus_detection)
 			enable_irq_wake(palmas_usb->vbus_irq);
+		if (palmas_usb->enable_gpio_vbus_detection)
+			enable_irq_wake(palmas_usb->gpio_vbus_irq);
 		if (palmas_usb->enable_id_detection)
 			enable_irq_wake(palmas_usb->id_irq);
 		if (palmas_usb->enable_gpio_id_detection)
@@ -352,6 +400,8 @@ static int palmas_usb_resume(struct device *dev)
 	if (device_may_wakeup(dev)) {
 		if (palmas_usb->enable_vbus_detection)
 			disable_irq_wake(palmas_usb->vbus_irq);
+		if (palmas_usb->enable_gpio_vbus_detection)
+			disable_irq_wake(palmas_usb->gpio_vbus_irq);
 		if (palmas_usb->enable_id_detection)
 			disable_irq_wake(palmas_usb->id_irq);
 		if (palmas_usb->enable_gpio_id_detection)

+ 6 - 2
drivers/extcon/extcon-rt8973a.c

@@ -93,6 +93,7 @@ static struct reg_data rt8973a_reg_data[] = {
 static const unsigned int rt8973a_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_JIG,
 	EXTCON_NONE,
@@ -398,6 +399,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
 
 	/* Change the state of external accessory */
 	extcon_set_cable_state_(info->edev, id, attached);
+	if (id == EXTCON_USB)
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 
 	return 0;
 }
@@ -663,7 +667,7 @@ MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
 #ifdef CONFIG_PM_SLEEP
 static int rt8973a_muic_suspend(struct device *dev)
 {
-	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *i2c = to_i2c_client(dev);
 	struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
 
 	enable_irq_wake(info->irq);
@@ -673,7 +677,7 @@ static int rt8973a_muic_suspend(struct device *dev)
 
 static int rt8973a_muic_resume(struct device *dev)
 {
-	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *i2c = to_i2c_client(dev);
 	struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
 
 	disable_irq_wake(info->irq);

+ 6 - 2
drivers/extcon/extcon-sm5502.c

@@ -95,6 +95,7 @@ static struct reg_data sm5502_reg_data[] = {
 static const unsigned int sm5502_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
 	EXTCON_CHG_USB_DCP,
 	EXTCON_NONE,
 };
@@ -411,6 +412,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
 
 	/* Change the state of external accessory */
 	extcon_set_cable_state_(info->edev, id, attached);
+	if (id == EXTCON_USB)
+		extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+					attached);
 
 	return 0;
 }
@@ -655,7 +659,7 @@ MODULE_DEVICE_TABLE(of, sm5502_dt_match);
 #ifdef CONFIG_PM_SLEEP
 static int sm5502_muic_suspend(struct device *dev)
 {
-	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *i2c = to_i2c_client(dev);
 	struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
 
 	enable_irq_wake(info->irq);
@@ -665,7 +669,7 @@ static int sm5502_muic_suspend(struct device *dev)
 
 static int sm5502_muic_resume(struct device *dev)
 {
-	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *i2c = to_i2c_client(dev);
 	struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
 
 	disable_irq_wake(info->irq);

+ 31 - 5
drivers/hv/channel.c

@@ -219,6 +219,21 @@ error0:
 }
 EXPORT_SYMBOL_GPL(vmbus_open);
 
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+				  const uuid_le *shv_host_servie_id)
+{
+	struct vmbus_channel_tl_connect_request conn_msg;
+
+	memset(&conn_msg, 0, sizeof(conn_msg));
+	conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+	conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+	conn_msg.host_service_id = *shv_host_servie_id;
+
+	return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
 /*
  * create_gpadl_header - Creates a gpadl for the specified buffer
  */
@@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	u64 aligned_data = 0;
 	int ret;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
 	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
-				  &signal);
+				  &signal, lock);
 
 	/*
 	 * Signalling the host is conditional on many factors:
@@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	 * If we cannot write to the ring-buffer; signal the host
 	 * even if we may not have written anything. This is a rare
 	 * enough condition that it should not matter.
+	 * NOTE: in this case, the hvsock channel is an exception, because
+	 * it looks the host side's hvsock implementation has a throttling
+	 * mechanism which can hurt the performance otherwise.
 	 */
 
 	if (channel->signal_policy)
@@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	else
 		kick_q = true;
 
-	if (((ret == 0) && kick_q && signal) || (ret))
+	if (((ret == 0) && kick_q && signal) ||
+	    (ret && !is_hvsock_channel(channel)))
 		vmbus_setevent(channel);
 
 	return ret;
@@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 
 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
 		return -EINVAL;
@@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	/*
 	 * Signalling the host is conditional on many factors:
@@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 
 	packetlen = desc_size + bufferlen;
 	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	if (ret == 0 && signal)
 		vmbus_setevent(channel);
@@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
 	bool signal = false;
+	bool lock = channel->acquire_ring_lock;
 	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 					 multi_pagebuffer->len);
 
@@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+				  &signal, lock);
 
 	if (ret == 0 && signal)
 		vmbus_setevent(channel);

+ 213 - 49
drivers/hv/channel_mgmt.c

@@ -28,12 +28,127 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/hyperv.h>
 
 #include "hyperv_vmbus.h"
 
-static void init_vp_index(struct vmbus_channel *channel,
-			  const uuid_le *type_guid);
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+
+static const struct vmbus_device vmbus_devs[] = {
+	/* IDE */
+	{ .dev_type = HV_IDE,
+	  HV_IDE_GUID,
+	  .perf_device = true,
+	},
+
+	/* SCSI */
+	{ .dev_type = HV_SCSI,
+	  HV_SCSI_GUID,
+	  .perf_device = true,
+	},
+
+	/* Fibre Channel */
+	{ .dev_type = HV_FC,
+	  HV_SYNTHFC_GUID,
+	  .perf_device = true,
+	},
+
+	/* Synthetic NIC */
+	{ .dev_type = HV_NIC,
+	  HV_NIC_GUID,
+	  .perf_device = true,
+	},
+
+	/* Network Direct */
+	{ .dev_type = HV_ND,
+	  HV_ND_GUID,
+	  .perf_device = true,
+	},
+
+	/* PCIE */
+	{ .dev_type = HV_PCIE,
+	  HV_PCIE_GUID,
+	  .perf_device = true,
+	},
+
+	/* Synthetic Frame Buffer */
+	{ .dev_type = HV_FB,
+	  HV_SYNTHVID_GUID,
+	  .perf_device = false,
+	},
+
+	/* Synthetic Keyboard */
+	{ .dev_type = HV_KBD,
+	  HV_KBD_GUID,
+	  .perf_device = false,
+	},
+
+	/* Synthetic MOUSE */
+	{ .dev_type = HV_MOUSE,
+	  HV_MOUSE_GUID,
+	  .perf_device = false,
+	},
+
+	/* KVP */
+	{ .dev_type = HV_KVP,
+	  HV_KVP_GUID,
+	  .perf_device = false,
+	},
+
+	/* Time Synch */
+	{ .dev_type = HV_TS,
+	  HV_TS_GUID,
+	  .perf_device = false,
+	},
+
+	/* Heartbeat */
+	{ .dev_type = HV_HB,
+	  HV_HEART_BEAT_GUID,
+	  .perf_device = false,
+	},
+
+	/* Shutdown */
+	{ .dev_type = HV_SHUTDOWN,
+	  HV_SHUTDOWN_GUID,
+	  .perf_device = false,
+	},
+
+	/* File copy */
+	{ .dev_type = HV_FCOPY,
+	  HV_FCOPY_GUID,
+	  .perf_device = false,
+	},
+
+	/* Backup */
+	{ .dev_type = HV_BACKUP,
+	  HV_VSS_GUID,
+	  .perf_device = false,
+	},
+
+	/* Dynamic Memory */
+	{ .dev_type = HV_DM,
+	  HV_DM_GUID,
+	  .perf_device = false,
+	},
+
+	/* Unknown GUID */
+	{ .dev_type = HV_UNKOWN,
+	  .perf_device = false,
+	},
+};
+
+static u16 hv_get_dev_type(const uuid_le *guid)
+{
+	u16 i;
+
+	for (i = HV_IDE; i < HV_UNKOWN; i++) {
+		if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+			return i;
+	}
+	pr_info("Unknown GUID: %pUl\n", guid);
+	return i;
+}
 
 /**
  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
@@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
 		return NULL;
 
 	channel->id = atomic_inc_return(&chan_num);
+	channel->acquire_ring_lock = true;
 	spin_lock_init(&channel->inbound_lock);
 	spin_lock_init(&channel->lock);
 
@@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
 	vmbus_release_relid(relid);
 
 	BUG_ON(!channel->rescind);
+	BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
 	if (channel->target_cpu != get_cpu()) {
 		put_cpu();
@@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
 	}
 
 	if (channel->primary_channel == NULL) {
-		mutex_lock(&vmbus_connection.channel_mutex);
 		list_del(&channel->listentry);
-		mutex_unlock(&vmbus_connection.channel_mutex);
 
 		primary_channel = channel;
 	} else {
@@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	struct vmbus_channel *channel;
 	bool fnew = true;
 	unsigned long flags;
+	u16 dev_type;
+	int ret;
 
 	/* Make sure this is a new offer */
 	mutex_lock(&vmbus_connection.channel_mutex);
@@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 			goto err_free_chan;
 	}
 
-	init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
+	dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+
+	init_vp_index(newchannel, dev_type);
 
 	if (newchannel->target_cpu != get_cpu()) {
 		put_cpu();
@@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	if (!newchannel->device_obj)
 		goto err_deq_chan;
 
+	newchannel->device_obj->device_id = dev_type;
 	/*
 	 * Add the new device to the bus. This will kick off device-driver
 	 * binding which eventually invokes the device driver's AddDevice()
 	 * method.
 	 */
-	if (vmbus_device_register(newchannel->device_obj) != 0) {
+	mutex_lock(&vmbus_connection.channel_mutex);
+	ret = vmbus_device_register(newchannel->device_obj);
+	mutex_unlock(&vmbus_connection.channel_mutex);
+
+	if (ret != 0) {
 		pr_err("unable to add child device object (relid %d)\n",
 			newchannel->offermsg.child_relid);
 		kfree(newchannel->device_obj);
@@ -358,37 +482,6 @@ err_free_chan:
 	free_channel(newchannel);
 }
 
-enum {
-	IDE = 0,
-	SCSI,
-	FC,
-	NIC,
-	ND_NIC,
-	PCIE,
-	MAX_PERF_CHN,
-};
-
-/*
- * This is an array of device_ids (device types) that are performance critical.
- * We attempt to distribute the interrupt load for these devices across
- * all available CPUs.
- */
-static const struct hv_vmbus_device_id hp_devs[] = {
-	/* IDE */
-	{ HV_IDE_GUID, },
-	/* Storage - SCSI */
-	{ HV_SCSI_GUID, },
-	/* Storage - FC */
-	{ HV_SYNTHFC_GUID, },
-	/* Network */
-	{ HV_NIC_GUID, },
-	/* NetworkDirect Guest RDMA */
-	{ HV_ND_GUID, },
-	/* PCI Express Pass Through */
-	{ HV_PCIE_GUID, },
-};
-
-
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
@@ -405,22 +498,15 @@ static int next_numa_node_id;
  * For pre-win8 hosts or non-performance critical channels we assign the
  * first CPU in the first NUMA node.
  */
-static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
 {
 	u32 cur_cpu;
-	int i;
-	bool perf_chn = false;
+	bool perf_chn = vmbus_devs[dev_type].perf_device;
 	struct vmbus_channel *primary = channel->primary_channel;
 	int next_node;
 	struct cpumask available_mask;
 	struct cpumask *alloced_mask;
 
-	for (i = IDE; i < MAX_PERF_CHN; i++) {
-		if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
-			perf_chn = true;
-			break;
-		}
-	}
 	if ((vmbus_proto_version == VERSION_WS2008) ||
 	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
 		/*
@@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
 		    cpumask_of_node(primary->numa_node));
 
 	cur_cpu = -1;
+
+	/*
+	 * Normally Hyper-V host doesn't create more subchannels than there
+	 * are VCPUs on the node but it is possible when not all present VCPUs
+	 * on the node are initialized by guest. Clear the alloced_cpus_in_node
+	 * to start over.
+	 */
+	if (cpumask_equal(&primary->alloced_cpus_in_node,
+			  cpumask_of_node(primary->numa_node)))
+		cpumask_clear(&primary->alloced_cpus_in_node);
+
 	while (true) {
 		cur_cpu = cpumask_next(cur_cpu, &available_mask);
 		if (cur_cpu >= nr_cpu_ids) {
@@ -498,6 +595,32 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
 	channel->target_vp = hv_context.vp_index[cur_cpu];
 }
 
+static void vmbus_wait_for_unload(void)
+{
+	int cpu = smp_processor_id();
+	void *page_addr = hv_context.synic_message_page[cpu];
+	struct hv_message *msg = (struct hv_message *)page_addr +
+				  VMBUS_MESSAGE_SINT;
+	struct vmbus_channel_message_header *hdr;
+	bool unloaded = false;
+
+	while (1) {
+		if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
+			mdelay(10);
+			continue;
+		}
+
+		hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+		if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+			unloaded = true;
+
+		vmbus_signal_eom(msg);
+
+		if (unloaded)
+			break;
+	}
+}
+
 /*
  * vmbus_unload_response - Handler for the unload response.
  */
@@ -510,7 +633,7 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
 	complete(&vmbus_connection.unload_event);
 }
 
-void vmbus_initiate_unload(void)
+void vmbus_initiate_unload(bool crash)
 {
 	struct vmbus_channel_message_header hdr;
 
@@ -523,7 +646,14 @@ void vmbus_initiate_unload(void)
 	hdr.msgtype = CHANNELMSG_UNLOAD;
 	vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
 
-	wait_for_completion(&vmbus_connection.unload_event);
+	/*
+	 * vmbus_initiate_unload() is also called on crash and the crash can be
+	 * happening in an interrupt context, where scheduling is impossible.
+	 */
+	if (!crash)
+		wait_for_completion(&vmbus_connection.unload_event);
+	else
+		vmbus_wait_for_unload();
 }
 
 /*
@@ -592,6 +722,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 	struct device *dev;
 
 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+	mutex_lock(&vmbus_connection.channel_mutex);
 	channel = relid2channel(rescind->child_relid);
 
 	if (channel == NULL) {
@@ -600,7 +732,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 		 * vmbus_process_offer(), we have already invoked
 		 * vmbus_release_relid() on error.
 		 */
-		return;
+		goto out;
 	}
 
 	spin_lock_irqsave(&channel->lock, flags);
@@ -608,6 +740,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 	spin_unlock_irqrestore(&channel->lock, flags);
 
 	if (channel->device_obj) {
+		if (channel->chn_rescind_callback) {
+			channel->chn_rescind_callback(channel);
+			goto out;
+		}
 		/*
 		 * We will have to unregister this device from the
 		 * driver core.
@@ -621,7 +757,24 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 		hv_process_channel_removal(channel,
 			channel->offermsg.child_relid);
 	}
+
+out:
+	mutex_unlock(&vmbus_connection.channel_mutex);
+}
+
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+	mutex_lock(&vmbus_connection.channel_mutex);
+
+	BUG_ON(!is_hvsock_channel(channel));
+
+	channel->rescind = true;
+	vmbus_device_unregister(channel->device_obj);
+
+	mutex_unlock(&vmbus_connection.channel_mutex);
 }
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
 
 /*
  * vmbus_onoffers_delivered -
@@ -825,6 +978,10 @@ struct vmbus_channel_message_table_entry
 	{CHANNELMSG_VERSION_RESPONSE,		1, vmbus_onversion_response},
 	{CHANNELMSG_UNLOAD,			0, NULL},
 	{CHANNELMSG_UNLOAD_RESPONSE,		1, vmbus_unload_response},
+	{CHANNELMSG_18,				0, NULL},
+	{CHANNELMSG_19,				0, NULL},
+	{CHANNELMSG_20,				0, NULL},
+	{CHANNELMSG_TL_CONNECT_REQUEST,		0, NULL},
 };
 
 /*
@@ -973,3 +1130,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
 	return ret;
 }
 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+		void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+	channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);

+ 14 - 6
drivers/hv/connection.c

@@ -88,8 +88,16 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
 	 * This has been the behavior pre-win8. This is not
 	 * perf issue and having all channel messages delivered on CPU 0
 	 * would be ok.
+	 * For post win8 hosts, we support receiving channel messagges on
+	 * all the CPUs. This is needed for kexec to work correctly where
+	 * the CPU attempting to connect may not be CPU 0.
 	 */
-	msg->target_vcpu = 0;
+	if (version >= VERSION_WIN8_1) {
+		msg->target_vcpu = hv_context.vp_index[get_cpu()];
+		put_cpu();
+	} else {
+		msg->target_vcpu = 0;
+	}
 
 	/*
 	 * Add to list before we send the request since we may
@@ -236,7 +244,7 @@ void vmbus_disconnect(void)
 	/*
 	 * First send the unload request to the host.
 	 */
-	vmbus_initiate_unload();
+	vmbus_initiate_unload(false);
 
 	if (vmbus_connection.work_queue) {
 		drain_workqueue(vmbus_connection.work_queue);
@@ -288,7 +296,8 @@ struct vmbus_channel *relid2channel(u32 relid)
 	struct list_head *cur, *tmp;
 	struct vmbus_channel *cur_sc;
 
-	mutex_lock(&vmbus_connection.channel_mutex);
+	BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+
 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
 		if (channel->offermsg.child_relid == relid) {
 			found_channel = channel;
@@ -307,7 +316,6 @@ struct vmbus_channel *relid2channel(u32 relid)
 			}
 		}
 	}
-	mutex_unlock(&vmbus_connection.channel_mutex);
 
 	return found_channel;
 }
@@ -474,7 +482,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
 /*
  * vmbus_set_event - Send an event notification to the parent
  */
-int vmbus_set_event(struct vmbus_channel *channel)
+void vmbus_set_event(struct vmbus_channel *channel)
 {
 	u32 child_relid = channel->offermsg.child_relid;
 
@@ -485,5 +493,5 @@ int vmbus_set_event(struct vmbus_channel *channel)
 			(child_relid >> 5));
 	}
 
-	return hv_signal_event(channel->sig_event);
+	hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
 }

+ 18 - 18
drivers/hv/hv.c

@@ -204,6 +204,8 @@ int hv_init(void)
 	       sizeof(int) * NR_CPUS);
 	memset(hv_context.event_dpc, 0,
 	       sizeof(void *) * NR_CPUS);
+	memset(hv_context.msg_dpc, 0,
+	       sizeof(void *) * NR_CPUS);
 	memset(hv_context.clk_evt, 0,
 	       sizeof(void *) * NR_CPUS);
 
@@ -295,8 +297,14 @@ void hv_cleanup(void)
 	 * Cleanup the TSC page based CS.
 	 */
 	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
-		clocksource_change_rating(&hyperv_cs_tsc, 10);
-		clocksource_unregister(&hyperv_cs_tsc);
+		/*
+		 * Crash can happen in an interrupt context and unregistering
+		 * a clocksource is impossible and redundant in this case.
+		 */
+		if (!oops_in_progress) {
+			clocksource_change_rating(&hyperv_cs_tsc, 10);
+			clocksource_unregister(&hyperv_cs_tsc);
+		}
 
 		hypercall_msr.as_uint64 = 0;
 		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
@@ -337,22 +345,6 @@ int hv_post_message(union hv_connection_id connection_id,
 	return status & 0xFFFF;
 }
 
-
-/*
- * hv_signal_event -
- * Signal an event on the specified connection using the hypervisor event IPC.
- *
- * This involves a hypercall.
- */
-int hv_signal_event(void *con_id)
-{
-	u64 status;
-
-	status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL);
-
-	return status & 0xFFFF;
-}
-
 static int hv_ce_set_next_event(unsigned long delta,
 				struct clock_event_device *evt)
 {
@@ -425,6 +417,13 @@ int hv_synic_alloc(void)
 		}
 		tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
 
+		hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+		if (hv_context.msg_dpc[cpu] == NULL) {
+			pr_err("Unable to allocate event dpc\n");
+			goto err;
+		}
+		tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+
 		hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
 		if (hv_context.clk_evt[cpu] == NULL) {
 			pr_err("Unable to allocate clock event device\n");
@@ -466,6 +465,7 @@ err:
 static void hv_synic_free_cpu(int cpu)
 {
 	kfree(hv_context.event_dpc[cpu]);
+	kfree(hv_context.msg_dpc[cpu]);
 	kfree(hv_context.clk_evt[cpu]);
 	if (hv_context.synic_event_page[cpu])
 		free_page((unsigned long)hv_context.synic_event_page[cpu]);

+ 1 - 1
drivers/hv/hv_fcopy.c

@@ -251,7 +251,6 @@ void hv_fcopy_onchannelcallback(void *context)
 		 */
 
 		fcopy_transaction.recv_len = recvlen;
-		fcopy_transaction.recv_channel = channel;
 		fcopy_transaction.recv_req_id = requestid;
 		fcopy_transaction.fcopy_msg = fcopy_msg;
 
@@ -317,6 +316,7 @@ static void fcopy_on_reset(void)
 int hv_fcopy_init(struct hv_util_service *srv)
 {
 	recv_buffer = srv->recv_buffer;
+	fcopy_transaction.recv_channel = srv->channel;
 
 	/*
 	 * When this driver loads, the user level daemon that

+ 1 - 1
drivers/hv/hv_kvp.c

@@ -639,7 +639,6 @@ void hv_kvp_onchannelcallback(void *context)
 			 */
 
 			kvp_transaction.recv_len = recvlen;
-			kvp_transaction.recv_channel = channel;
 			kvp_transaction.recv_req_id = requestid;
 			kvp_transaction.kvp_msg = kvp_msg;
 
@@ -688,6 +687,7 @@ int
 hv_kvp_init(struct hv_util_service *srv)
 {
 	recv_buffer = srv->recv_buffer;
+	kvp_transaction.recv_channel = srv->channel;
 
 	/*
 	 * When this driver loads, the user level daemon that

+ 1 - 1
drivers/hv/hv_snapshot.c

@@ -263,7 +263,6 @@ void hv_vss_onchannelcallback(void *context)
 			 */
 
 			vss_transaction.recv_len = recvlen;
-			vss_transaction.recv_channel = channel;
 			vss_transaction.recv_req_id = requestid;
 			vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
 
@@ -337,6 +336,7 @@ hv_vss_init(struct hv_util_service *srv)
 		return -ENOTSUPP;
 	}
 	recv_buffer = srv->recv_buffer;
+	vss_transaction.recv_channel = srv->channel;
 
 	/*
 	 * When this driver loads, the user level daemon that

+ 1 - 0
drivers/hv/hv_util.c

@@ -322,6 +322,7 @@ static int util_probe(struct hv_device *dev,
 	srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
 	if (!srv->recv_buffer)
 		return -ENOMEM;
+	srv->channel = dev->channel;
 	if (srv->util_init) {
 		ret = srv->util_init(srv);
 		if (ret) {

+ 3 - 0
drivers/hv/hv_utils_transport.c

@@ -310,6 +310,9 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
 	return hvt;
 
 err_free_hvt:
+	spin_lock(&hvt_list_lock);
+	list_del(&hvt->list);
+	spin_unlock(&hvt_list_lock);
 	kfree(hvt);
 	return NULL;
 }

+ 30 - 6
drivers/hv/hyperv_vmbus.h

@@ -443,10 +443,11 @@ struct hv_context {
 	u32 vp_index[NR_CPUS];
 	/*
 	 * Starting with win8, we can take channel interrupts on any CPU;
-	 * we will manage the tasklet that handles events on a per CPU
+	 * we will manage the tasklet that handles events messages on a per CPU
 	 * basis.
 	 */
 	struct tasklet_struct *event_dpc[NR_CPUS];
+	struct tasklet_struct *msg_dpc[NR_CPUS];
 	/*
 	 * To optimize the mapping of relid to channel, maintain
 	 * per-cpu list of the channels based on their CPU affinity.
@@ -495,8 +496,6 @@ extern int hv_post_message(union hv_connection_id connection_id,
 			 enum hv_message_type message_type,
 			 void *payload, size_t payload_size);
 
-extern int hv_signal_event(void *con_id);
-
 extern int hv_synic_alloc(void);
 
 extern void hv_synic_free(void);
@@ -525,7 +524,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
 		    struct kvec *kv_list,
-		    u32 kv_count, bool *signal);
+		    u32 kv_count, bool *signal, bool lock);
 
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
@@ -620,6 +619,30 @@ struct vmbus_channel_message_table_entry {
 extern struct vmbus_channel_message_table_entry
 	channel_message_table[CHANNELMSG_COUNT];
 
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg)
+{
+	msg->header.message_type = HVMSG_NONE;
+
+	/*
+	 * Make sure the write to MessageType (ie set to
+	 * HVMSG_NONE) happens before we read the
+	 * MessagePending and EOMing. Otherwise, the EOMing
+	 * will not deliver any more messages since there is
+	 * no empty slot
+	 */
+	mb();
+
+	if (msg->header.message_flags.msg_pending) {
+		/*
+		 * This will cause message queue rescan to
+		 * possibly deliver another msg from the
+		 * hypervisor
+		 */
+		wrmsrl(HV_X64_MSR_EOM, 0);
+	}
+}
+
 /* General vmbus interface */
 
 struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -644,9 +667,10 @@ void vmbus_disconnect(void);
 
 int vmbus_post_msg(void *buffer, size_t buflen);
 
-int vmbus_set_event(struct vmbus_channel *channel);
+void vmbus_set_event(struct vmbus_channel *channel);
 
 void vmbus_on_event(unsigned long data);
+void vmbus_on_msg_dpc(unsigned long data);
 
 int hv_kvp_init(struct hv_util_service *);
 void hv_kvp_deinit(void);
@@ -659,7 +683,7 @@ void hv_vss_onchannelcallback(void *);
 int hv_fcopy_init(struct hv_util_service *);
 void hv_fcopy_deinit(void);
 void hv_fcopy_onchannelcallback(void *);
-void vmbus_initiate_unload(void);
+void vmbus_initiate_unload(bool crash);
 
 static inline void hv_poll_channel(struct vmbus_channel *channel,
 				   void (*cb)(void *))

+ 13 - 18
drivers/hv/ring_buffer.c

@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-		    struct kvec *kv_list, u32 kv_count, bool *signal)
+		    struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
 {
 	int i = 0;
 	u32 bytes_avail_towrite;
@@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 	u32 next_write_location;
 	u32 old_write;
 	u64 prev_indices = 0;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 	for (i = 0; i < kv_count; i++)
 		totalbytes_towrite += kv_list[i].iov_len;
 
 	totalbytes_towrite += sizeof(u64);
 
-	spin_lock_irqsave(&outring_info->ring_lock, flags);
+	if (lock)
+		spin_lock_irqsave(&outring_info->ring_lock, flags);
 
 	hv_get_ringbuffer_availbytes(outring_info,
 				&bytes_avail_toread,
@@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 	 * is empty since the read index == write index.
 	 */
 	if (bytes_avail_towrite <= totalbytes_towrite) {
-		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+		if (lock)
+			spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 		return -EAGAIN;
 	}
 
@@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 	hv_set_next_write_location(outring_info, next_write_location);
 
 
-	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+	if (lock)
+		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
 	*signal = hv_need_to_signal(old_write, outring_info);
 	return 0;
@@ -388,7 +391,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 	u32 bytes_avail_toread;
 	u32 next_read_location = 0;
 	u64 prev_indices = 0;
-	unsigned long flags;
 	struct vmpacket_descriptor desc;
 	u32 offset;
 	u32 packetlen;
@@ -397,7 +399,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 	if (buflen <= 0)
 		return -EINVAL;
 
-	spin_lock_irqsave(&inring_info->ring_lock, flags);
 
 	*buffer_actual_len = 0;
 	*requestid = 0;
@@ -412,7 +413,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 		 * No error is set when there is even no header, drivers are
 		 * supposed to analyze buffer_actual_len.
 		 */
-		goto out_unlock;
+		return ret;
 	}
 
 	next_read_location = hv_get_next_read_location(inring_info);
@@ -425,15 +426,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 	*buffer_actual_len = packetlen;
 	*requestid = desc.trans_id;
 
-	if (bytes_avail_toread < packetlen + offset) {
-		ret = -EAGAIN;
-		goto out_unlock;
-	}
+	if (bytes_avail_toread < packetlen + offset)
+		return -EAGAIN;
 
-	if (packetlen > buflen) {
-		ret = -ENOBUFS;
-		goto out_unlock;
-	}
+	if (packetlen > buflen)
+		return -ENOBUFS;
 
 	next_read_location =
 		hv_get_next_readlocation_withoffset(inring_info, offset);
@@ -460,7 +457,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 
 	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
-out_unlock:
-	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 	return ret;
 }

+ 51 - 66
drivers/hv/vmbus_drv.c

@@ -45,7 +45,6 @@
 
 static struct acpi_device  *hv_acpi_dev;
 
-static struct tasklet_struct msg_dpc;
 static struct completion probe_event;
 
 
@@ -477,6 +476,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(channel_vp_mapping);
 
+static ssize_t vendor_show(struct device *dev,
+			   struct device_attribute *dev_attr,
+			   char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+			   struct device_attribute *dev_attr,
+			   char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 static struct attribute *vmbus_attrs[] = {
 	&dev_attr_id.attr,
@@ -502,6 +519,8 @@ static struct attribute *vmbus_attrs[] = {
 	&dev_attr_in_read_bytes_avail.attr,
 	&dev_attr_in_write_bytes_avail.attr,
 	&dev_attr_channel_vp_mapping.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_device.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(vmbus);
@@ -562,6 +581,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
 	struct hv_driver *drv = drv_to_hv_drv(driver);
 	struct hv_device *hv_dev = device_to_hv_device(device);
 
+	/* The hv_sock driver handles all hv_sock offers. */
+	if (is_hvsock_channel(hv_dev->channel))
+		return drv->hvsock;
+
 	if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
 		return 1;
 
@@ -685,28 +708,10 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
 	if (dev->event_handler)
 		dev->event_handler(dev);
 
-	msg->header.message_type = HVMSG_NONE;
-
-	/*
-	 * Make sure the write to MessageType (ie set to
-	 * HVMSG_NONE) happens before we read the
-	 * MessagePending and EOMing. Otherwise, the EOMing
-	 * will not deliver any more messages since there is
-	 * no empty slot
-	 */
-	mb();
-
-	if (msg->header.message_flags.msg_pending) {
-		/*
-		 * This will cause message queue rescan to
-		 * possibly deliver another msg from the
-		 * hypervisor
-		 */
-		wrmsrl(HV_X64_MSR_EOM, 0);
-	}
+	vmbus_signal_eom(msg);
 }
 
-static void vmbus_on_msg_dpc(unsigned long data)
+void vmbus_on_msg_dpc(unsigned long data)
 {
 	int cpu = smp_processor_id();
 	void *page_addr = hv_context.synic_message_page[cpu];
@@ -716,52 +721,32 @@ static void vmbus_on_msg_dpc(unsigned long data)
 	struct vmbus_channel_message_table_entry *entry;
 	struct onmessage_work_context *ctx;
 
-	while (1) {
-		if (msg->header.message_type == HVMSG_NONE)
-			/* no msg */
-			break;
+	if (msg->header.message_type == HVMSG_NONE)
+		/* no msg */
+		return;
 
-		hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+	hdr = (struct vmbus_channel_message_header *)msg->u.payload;
 
-		if (hdr->msgtype >= CHANNELMSG_COUNT) {
-			WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
-			goto msg_handled;
-		}
+	if (hdr->msgtype >= CHANNELMSG_COUNT) {
+		WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+		goto msg_handled;
+	}
 
-		entry = &channel_message_table[hdr->msgtype];
-		if (entry->handler_type	== VMHT_BLOCKING) {
-			ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
-			if (ctx == NULL)
-				continue;
+	entry = &channel_message_table[hdr->msgtype];
+	if (entry->handler_type	== VMHT_BLOCKING) {
+		ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+		if (ctx == NULL)
+			return;
 
-			INIT_WORK(&ctx->work, vmbus_onmessage_work);
-			memcpy(&ctx->msg, msg, sizeof(*msg));
+		INIT_WORK(&ctx->work, vmbus_onmessage_work);
+		memcpy(&ctx->msg, msg, sizeof(*msg));
 
-			queue_work(vmbus_connection.work_queue, &ctx->work);
-		} else
-			entry->message_handler(hdr);
+		queue_work(vmbus_connection.work_queue, &ctx->work);
+	} else
+		entry->message_handler(hdr);
 
 msg_handled:
-		msg->header.message_type = HVMSG_NONE;
-
-		/*
-		 * Make sure the write to MessageType (ie set to
-		 * HVMSG_NONE) happens before we read the
-		 * MessagePending and EOMing. Otherwise, the EOMing
-		 * will not deliver any more messages since there is
-		 * no empty slot
-		 */
-		mb();
-
-		if (msg->header.message_flags.msg_pending) {
-			/*
-			 * This will cause message queue rescan to
-			 * possibly deliver another msg from the
-			 * hypervisor
-			 */
-			wrmsrl(HV_X64_MSR_EOM, 0);
-		}
-	}
+	vmbus_signal_eom(msg);
 }
 
 static void vmbus_isr(void)
@@ -814,7 +799,7 @@ static void vmbus_isr(void)
 		if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
 			hv_process_timer_expiration(msg, cpu);
 		else
-			tasklet_schedule(&msg_dpc);
+			tasklet_schedule(hv_context.msg_dpc[cpu]);
 	}
 }
 
@@ -838,8 +823,6 @@ static int vmbus_bus_init(void)
 		return ret;
 	}
 
-	tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-
 	ret = bus_register(&hv_bus);
 	if (ret)
 		goto err_cleanup;
@@ -957,6 +940,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
 	memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
 	memcpy(&child_device_obj->dev_instance, instance,
 	       sizeof(uuid_le));
+	child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
 
 
 	return child_device_obj;
@@ -1268,7 +1252,7 @@ static void hv_kexec_handler(void)
 	int cpu;
 
 	hv_synic_clockevents_cleanup();
-	vmbus_initiate_unload();
+	vmbus_initiate_unload(false);
 	for_each_online_cpu(cpu)
 		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
 	hv_cleanup();
@@ -1276,7 +1260,7 @@ static void hv_kexec_handler(void)
 
 static void hv_crash_handler(struct pt_regs *regs)
 {
-	vmbus_initiate_unload();
+	vmbus_initiate_unload(true);
 	/*
 	 * In crash handler we can't schedule synic cleanup for all CPUs,
 	 * doing the cleanup for current CPU only. This should be sufficient
@@ -1334,7 +1318,8 @@ static void __exit vmbus_exit(void)
 	hv_synic_clockevents_cleanup();
 	vmbus_disconnect();
 	hv_remove_vmbus_irq();
-	tasklet_kill(&msg_dpc);
+	for_each_online_cpu(cpu)
+		tasklet_kill(hv_context.msg_dpc[cpu]);
 	vmbus_free_channels();
 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
 		unregister_die_notifier(&hyperv_die_block);

+ 1 - 0
drivers/hwtracing/coresight/Kconfig

@@ -4,6 +4,7 @@
 menuconfig CORESIGHT
 	bool "CoreSight Tracing Support"
 	select ARM_AMBA
+	select PERF_EVENTS
 	help
 	  This framework provides a kernel interface for the CoreSight debug
 	  and trace drivers to register themselves with. It's intended to build

+ 3 - 1
drivers/hwtracing/coresight/Makefile

@@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
 obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
 					   coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+					coresight-etm3x-sysfs.o \
+					coresight-etm-perf.o
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o

+ 264 - 29
drivers/hwtracing/coresight/coresight-etb10.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Embedded Trace Buffer driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,8 +12,8 @@
  * GNU General Public License for more details.
  */
 
+#include <asm/local.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -27,6 +29,11 @@
 #include <linux/coresight.h>
 #include <linux/amba/bus.h>
 #include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
 
 #include "coresight-priv.h"
 
@@ -63,6 +70,26 @@
 #define ETB_FFSR_BIT		1
 #define ETB_FRAME_SIZE_WORDS	4
 
+/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur:	index of the current buffer
+ * @nr_pages:	max number of pages granted to us
+ * @offset:	offset within the current buffer
+ * @data_size:	how much we collected in this run
+ * @lost:	other than zero if we had a HW buffer wrap around
+ * @snapshot:	is this run in snapshot mode
+ * @data_pages:	a handle the ring buffer
+ */
+struct cs_buffers {
+	unsigned int		cur;
+	unsigned int		nr_pages;
+	unsigned long		offset;
+	local_t			data_size;
+	local_t			lost;
+	bool			snapshot;
+	void			**data_pages;
+};
+
 /**
  * struct etb_drvdata - specifics associated to an ETB component
  * @base:	memory mapped base address for this component.
@@ -71,10 +98,10 @@
  * @csdev:	component vitals needed by the framework.
  * @miscdev:	specifics to handle "/dev/xyz.etb" entry.
  * @spinlock:	only one at a time pls.
- * @in_use:	synchronise user space access to etb buffer.
+ * @reading:	synchronise user space access to etb buffer.
+ * @mode:	this ETB is being used.
  * @buf:	area of memory where ETB buffer content gets sent.
  * @buffer_depth: size of @buf.
- * @enable:	this ETB is being used.
  * @trigger_cntr: amount of words to store after a trigger.
  */
 struct etb_drvdata {
@@ -84,10 +111,10 @@ struct etb_drvdata {
 	struct coresight_device	*csdev;
 	struct miscdevice	miscdev;
 	spinlock_t		spinlock;
-	atomic_t		in_use;
+	local_t			reading;
+	local_t			mode;
 	u8			*buf;
 	u32			buffer_depth;
-	bool			enable;
 	u32			trigger_cntr;
 };
 
@@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
 	CS_LOCK(drvdata->base);
 }
 
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
 {
-	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	u32 val;
 	unsigned long flags;
+	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
+	val = local_cmpxchg(&drvdata->mode,
+			    CS_MODE_DISABLED, mode);
+	/*
+	 * When accessing from Perf, a HW buffer can be handled
+	 * by a single trace entity.  In sysFS mode many tracers
+	 * can be logging to the same HW buffer.
+	 */
+	if (val == CS_MODE_PERF)
+		return -EBUSY;
+
+	/* Nothing to do, the tracer is already enabled. */
+	if (val == CS_MODE_SYSFS)
+		goto out;
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	etb_enable_hw(drvdata);
-	drvdata->enable = true;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+out:
 	dev_info(drvdata->dev, "ETB enabled\n");
 	return 0;
 }
@@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev)
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	etb_disable_hw(drvdata);
 	etb_dump_hw(drvdata);
-	drvdata->enable = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	pm_runtime_put(drvdata->dev);
+	local_set(&drvdata->mode, CS_MODE_DISABLED);
 
 	dev_info(drvdata->dev, "ETB disabled\n");
 }
 
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+			      void **pages, int nr_pages, bool overwrite)
+{
+	int node;
+	struct cs_buffers *buf;
+
+	if (cpu == -1)
+		cpu = smp_processor_id();
+	node = cpu_to_node(cpu);
+
+	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+	if (!buf)
+		return NULL;
+
+	buf->snapshot = overwrite;
+	buf->nr_pages = nr_pages;
+	buf->data_pages = pages;
+
+	return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+	struct cs_buffers *buf = config;
+
+	kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+			  struct perf_output_handle *handle,
+			  void *sink_config)
+{
+	int ret = 0;
+	unsigned long head;
+	struct cs_buffers *buf = sink_config;
+
+	/* wrap head around to the amount of space we have */
+	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+	/* find the page to write to */
+	buf->cur = head / PAGE_SIZE;
+
+	/* and offset within that page */
+	buf->offset = head % PAGE_SIZE;
+
+	local_set(&buf->data_size, 0);
+
+	return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+				      struct perf_output_handle *handle,
+				      void *sink_config, bool *lost)
+{
+	unsigned long size = 0;
+	struct cs_buffers *buf = sink_config;
+
+	if (buf) {
+		/*
+		 * In snapshot mode ->data_size holds the new address of the
+		 * ring buffer's head.  The size itself is the whole address
+		 * range since we want the latest information.
+		 */
+		if (buf->snapshot)
+			handle->head = local_xchg(&buf->data_size,
+						  buf->nr_pages << PAGE_SHIFT);
+
+		/*
+		 * Tell the tracer PMU how much we got in this run and if
+		 * something went wrong along the way.  Nobody else can use
+		 * this cs_buffers instance until we are done.  As such
+		 * resetting parameters here and squaring off with the ring
+		 * buffer API in the tracer PMU is fine.
+		 */
+		*lost = !!local_xchg(&buf->lost, 0);
+		size = local_xchg(&buf->data_size, 0);
+	}
+
+	return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+			      struct perf_output_handle *handle,
+			      void *sink_config)
+{
+	int i, cur;
+	u8 *buf_ptr;
+	u32 read_ptr, write_ptr, capacity;
+	u32 status, read_data, to_read;
+	unsigned long offset;
+	struct cs_buffers *buf = sink_config;
+	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	if (!buf)
+		return;
+
+	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+	CS_UNLOCK(drvdata->base);
+	etb_disable_hw(drvdata);
+
+	/* unit is in words, not bytes */
+	read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+	write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+	/*
+	 * Entries should be aligned to the frame size.  If they are not
+	 * go back to the last alignement point to give decoding tools a
+	 * chance to fix things.
+	 */
+	if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+		dev_err(drvdata->dev,
+			"write_ptr: %lu not aligned to formatter frame size\n",
+			(unsigned long)write_ptr);
+
+		write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+		local_inc(&buf->lost);
+	}
+
+	/*
+	 * Get a hold of the status register and see if a wrap around
+	 * has occurred.  If so adjust things accordingly.  Otherwise
+	 * start at the beginning and go until the write pointer has
+	 * been reached.
+	 */
+	status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+	if (status & ETB_STATUS_RAM_FULL) {
+		local_inc(&buf->lost);
+		to_read = capacity;
+		read_ptr = write_ptr;
+	} else {
+		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+		to_read *= ETB_FRAME_SIZE_WORDS;
+	}
+
+	/*
+	 * Make sure we don't overwrite data that hasn't been consumed yet.
+	 * It is entirely possible that the HW buffer has more data than the
+	 * ring buffer can currently handle.  If so adjust the start address
+	 * to take only the last traces.
+	 *
+	 * In snapshot mode we are looking to get the latest traces only and as
+	 * such, we don't care about not overwriting data that hasn't been
+	 * processed by user space.
+	 */
+	if (!buf->snapshot && to_read > handle->size) {
+		u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+		/* The new read pointer must be frame size aligned */
+		to_read -= handle->size & mask;
+		/*
+		 * Move the RAM read pointer up, keeping in mind that
+		 * everything is in frame size units.
+		 */
+		read_ptr = (write_ptr + drvdata->buffer_depth) -
+					to_read / ETB_FRAME_SIZE_WORDS;
+		/* Wrap around if need be*/
+		read_ptr &= ~(drvdata->buffer_depth - 1);
+		/* let the decoder know we've skipped ahead */
+		local_inc(&buf->lost);
+	}
+
+	/* finally tell HW where we want to start reading from */
+	writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+	cur = buf->cur;
+	offset = buf->offset;
+	for (i = 0; i < to_read; i += 4) {
+		buf_ptr = buf->data_pages[cur] + offset;
+		read_data = readl_relaxed(drvdata->base +
+					  ETB_RAM_READ_DATA_REG);
+		*buf_ptr++ = read_data >> 0;
+		*buf_ptr++ = read_data >> 8;
+		*buf_ptr++ = read_data >> 16;
+		*buf_ptr++ = read_data >> 24;
+
+		offset += 4;
+		if (offset >= PAGE_SIZE) {
+			offset = 0;
+			cur++;
+			/* wrap around at the end of the buffer */
+			cur &= buf->nr_pages - 1;
+		}
+	}
+
+	/* reset ETB buffer for next run */
+	writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+	writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+	/*
+	 * In snapshot mode all we have to do is communicate to
+	 * perf_aux_output_end() the address of the current head.  In full
+	 * trace mode the same function expects a size to move rb->aux_head
+	 * forward.
+	 */
+	if (buf->snapshot)
+		local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+	else
+		local_add(to_read, &buf->data_size);
+
+	etb_enable_hw(drvdata);
+	CS_LOCK(drvdata->base);
+}
+
 static const struct coresight_ops_sink etb_sink_ops = {
 	.enable		= etb_enable,
 	.disable	= etb_disable,
+	.alloc_buffer	= etb_alloc_buffer,
+	.free_buffer	= etb_free_buffer,
+	.set_buffer	= etb_set_buffer,
+	.reset_buffer	= etb_reset_buffer,
+	.update_buffer	= etb_update_buffer,
 };
 
 static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
 	unsigned long flags;
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
-	if (drvdata->enable) {
+	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
 		etb_disable_hw(drvdata);
 		etb_dump_hw(drvdata);
 		etb_enable_hw(drvdata);
@@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file)
 	struct etb_drvdata *drvdata = container_of(file->private_data,
 						   struct etb_drvdata, miscdev);
 
-	if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+	if (local_cmpxchg(&drvdata->reading, 0, 1))
 		return -EBUSY;
 
 	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file)
 {
 	struct etb_drvdata *drvdata = container_of(file->private_data,
 						   struct etb_drvdata, miscdev);
-	atomic_set(&drvdata->in_use, 0);
+	local_set(&drvdata->reading, 0);
 
 	dev_dbg(drvdata->dev, "%s: released\n", __func__);
 	return 0;
@@ -489,15 +737,6 @@ err_misc_register:
 	return ret;
 }
 
-static int etb_remove(struct amba_device *adev)
-{
-	struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
-	misc_deregister(&drvdata->miscdev);
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int etb_runtime_suspend(struct device *dev)
 {
@@ -537,14 +776,10 @@ static struct amba_driver etb_driver = {
 		.name	= "coresight-etb10",
 		.owner	= THIS_MODULE,
 		.pm	= &etb_dev_pm_ops,
+		.suppress_bind_attrs = true,
 
 	},
 	.probe		= etb_probe,
-	.remove		= etb_remove,
 	.id_table	= etb_ids,
 };
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);

+ 393 - 0
drivers/hwtracing/coresight/coresight-etm-perf.c

@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work:		Handle to free allocated memory outside IRQ context.
+ * @mask:		Hold the CPU(s) this event was set for.
+ * @snk_config:		The sink configuration.
+ * @path:		An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+	struct work_struct work;
+	cpumask_t mask;
+	void *snk_config;
+	struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc,		"config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp,	"config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+	&format_attr_cycacc.attr,
+	&format_attr_timestamp.attr,
+	NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+	.name   = "format",
+	.attrs  = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+	&etm_pmu_format_group,
+	NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+	if (event->attr.type != etm_pmu.type)
+		return -ENOENT;
+
+	return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+	int cpu;
+	cpumask_t *mask;
+	struct etm_event_data *event_data;
+	struct coresight_device *sink;
+
+	event_data = container_of(work, struct etm_event_data, work);
+	mask = &event_data->mask;
+	/*
+	 * First deal with the sink configuration.  See comment in
+	 * etm_setup_aux() about why we take the first available path.
+	 */
+	if (event_data->snk_config) {
+		cpu = cpumask_first(mask);
+		sink = coresight_get_sink(event_data->path[cpu]);
+		if (sink_ops(sink)->free_buffer)
+			sink_ops(sink)->free_buffer(event_data->snk_config);
+	}
+
+	for_each_cpu(cpu, mask) {
+		if (event_data->path[cpu])
+			coresight_release_path(event_data->path[cpu]);
+	}
+
+	kfree(event_data->path);
+	kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+	int size;
+	cpumask_t *mask;
+	struct etm_event_data *event_data;
+
+	/* First get memory for the session's data */
+	event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+	if (!event_data)
+		return NULL;
+
+	/* Make sure nothing disappears under us */
+	get_online_cpus();
+	size = num_online_cpus();
+
+	mask = &event_data->mask;
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, mask);
+	else
+		cpumask_copy(mask, cpu_online_mask);
+	put_online_cpus();
+
+	/*
+	 * Each CPU has a single path between source and destination.  As such
+	 * allocate an array using CPU numbers as indexes.  That way a path
+	 * for any CPU can easily be accessed at any given time.  We proceed
+	 * the same way for sessions involving a single CPU.  The cost of
+	 * unused memory when dealing with single CPU trace scenarios is small
+	 * compared to the cost of searching through an optimized array.
+	 */
+	event_data->path = kcalloc(size,
+				   sizeof(struct list_head *), GFP_KERNEL);
+	if (!event_data->path) {
+		kfree(event_data);
+		return NULL;
+	}
+
+	return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+	struct etm_event_data *event_data = data;
+
+	schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+			   int nr_pages, bool overwrite)
+{
+	int cpu;
+	cpumask_t *mask;
+	struct coresight_device *sink;
+	struct etm_event_data *event_data = NULL;
+
+	event_data = alloc_event_data(event_cpu);
+	if (!event_data)
+		return NULL;
+
+	INIT_WORK(&event_data->work, free_event_data);
+
+	mask = &event_data->mask;
+
+	/* Setup the path for each CPU in a trace session */
+	for_each_cpu(cpu, mask) {
+		struct coresight_device *csdev;
+
+		csdev = per_cpu(csdev_src, cpu);
+		if (!csdev)
+			goto err;
+
+		/*
+		 * Building a path doesn't enable it, it simply builds a
+		 * list of devices from source to sink that can be
+		 * referenced later when the path is actually needed.
+		 */
+		event_data->path[cpu] = coresight_build_path(csdev);
+		if (!event_data->path[cpu])
+			goto err;
+	}
+
+	/*
+	 * In theory nothing prevent tracers in a trace session from being
+	 * associated with different sinks, nor having a sink per tracer.  But
+	 * until we have HW with this kind of topology and a way to convey
+	 * sink assignement from the perf cmd line we need to assume tracers
+	 * in a trace session are using the same sink.  Therefore pick the sink
+	 * found at the end of the first available path.
+	 */
+	cpu = cpumask_first(mask);
+	/* Grab the sink at the end of the path */
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (!sink)
+		goto err;
+
+	if (!sink_ops(sink)->alloc_buffer)
+		goto err;
+
+	/* Get the AUX specific data from the sink buffer */
+	event_data->snk_config =
+			sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+						     nr_pages, overwrite);
+	if (!event_data->snk_config)
+		goto err;
+
+out:
+	return event_data;
+
+err:
+	etm_free_aux(event_data);
+	event_data = NULL;
+	goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+	int cpu = smp_processor_id();
+	struct etm_event_data *event_data;
+	struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+	if (!csdev)
+		goto fail;
+
+	/*
+	 * Deal with the ring buffer API and get a handle on the
+	 * session's information.
+	 */
+	event_data = perf_aux_output_begin(handle, event);
+	if (!event_data)
+		goto fail;
+
+	/* We need a sink, no need to continue without one */
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+		goto fail_end_stop;
+
+	/* Configure the sink */
+	if (sink_ops(sink)->set_buffer(sink, handle,
+				       event_data->snk_config))
+		goto fail_end_stop;
+
+	/* Nothing will happen without a path */
+	if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+		goto fail_end_stop;
+
+	/* Tell the perf core the event is alive */
+	event->hw.state = 0;
+
+	/* Finally enable the tracer */
+	if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+		goto fail_end_stop;
+
+out:
+	return;
+
+fail_end_stop:
+	perf_aux_output_end(handle, 0, true);
+fail:
+	event->hw.state = PERF_HES_STOPPED;
+	goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+	bool lost;
+	int cpu = smp_processor_id();
+	unsigned long size;
+	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+	struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+	struct etm_event_data *event_data = perf_get_aux(handle);
+
+	if (event->hw.state == PERF_HES_STOPPED)
+		return;
+
+	if (!csdev)
+		return;
+
+	sink = coresight_get_sink(event_data->path[cpu]);
+	if (!sink)
+		return;
+
+	/* stop tracer */
+	source_ops(csdev)->disable(csdev);
+
+	/* tell the core */
+	event->hw.state = PERF_HES_STOPPED;
+
+	if (mode & PERF_EF_UPDATE) {
+		if (WARN_ON_ONCE(handle->event != event))
+			return;
+
+		/* update trace information */
+		if (!sink_ops(sink)->update_buffer)
+			return;
+
+		sink_ops(sink)->update_buffer(sink, handle,
+					      event_data->snk_config);
+
+		if (!sink_ops(sink)->reset_buffer)
+			return;
+
+		size = sink_ops(sink)->reset_buffer(sink, handle,
+						    event_data->snk_config,
+						    &lost);
+
+		perf_aux_output_end(handle, size, lost);
+	}
+
+	/* Disabling the path make its elements available to other sessions */
+	coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+	int ret = 0;
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (mode & PERF_EF_START) {
+		etm_event_start(event, 0);
+		if (hwc->state & PERF_HES_STOPPED)
+			ret = -EINVAL;
+	} else {
+		hwc->state = PERF_HES_STOPPED;
+	}
+
+	return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+	etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+	char entry[sizeof("cpu9999999")];
+	int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+	struct device *pmu_dev = etm_pmu.dev;
+	struct device *cs_dev = &csdev->dev;
+
+	sprintf(entry, "cpu%d", cpu);
+
+	if (!etm_perf_up)
+		return -EPROBE_DEFER;
+
+	if (link) {
+		ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+		if (ret)
+			return ret;
+		per_cpu(csdev_src, cpu) = csdev;
+	} else {
+		sysfs_remove_link(&pmu_dev->kobj, entry);
+		per_cpu(csdev_src, cpu) = NULL;
+	}
+
+	return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+	int ret;
+
+	etm_pmu.capabilities	= PERF_PMU_CAP_EXCLUSIVE;
+
+	etm_pmu.attr_groups	= etm_pmu_attr_groups;
+	etm_pmu.task_ctx_nr	= perf_sw_context;
+	etm_pmu.read		= etm_event_read;
+	etm_pmu.event_init	= etm_event_init;
+	etm_pmu.setup_aux	= etm_setup_aux;
+	etm_pmu.free_aux	= etm_free_aux;
+	etm_pmu.start		= etm_event_start;
+	etm_pmu.stop		= etm_event_stop;
+	etm_pmu.add		= etm_event_add;
+	etm_pmu.del		= etm_event_del;
+
+	ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+	if (ret == 0)
+		etm_perf_up = true;
+
+	return ret;
+}
+device_initcall(etm_perf_init);

+ 32 - 0
drivers/hwtracing/coresight/coresight-etm-perf.h

@@ -0,0 +1,32 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
+
+struct coresight_device;
+
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
+
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
+
+#endif

+ 96 - 46
drivers/hwtracing/coresight/coresight-etm.h

@@ -13,6 +13,7 @@
 #ifndef _CORESIGHT_CORESIGHT_ETM_H
 #define _CORESIGHT_CORESIGHT_ETM_H
 
+#include <asm/local.h>
 #include <linux/spinlock.h>
 #include "coresight-priv.h"
 
@@ -109,7 +110,10 @@
 #define ETM_MODE_STALL		BIT(2)
 #define ETM_MODE_TIMESTAMP	BIT(3)
 #define ETM_MODE_CTXID		BIT(4)
-#define ETM_MODE_ALL		0x1f
+#define ETM_MODE_ALL		(ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+				 ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+				 ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+				 ETM_MODE_EXCL_USER)
 
 #define ETM_SQR_MASK		0x3
 #define ETM_TRACEID_MASK	0x3f
@@ -136,35 +140,16 @@
 #define ETM_DEFAULT_EVENT_VAL	(ETM_HARD_WIRE_RES_A	|	\
 				 ETM_ADD_COMP_0		|	\
 				 ETM_EVENT_NOT_A)
+
 /**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base:	memory mapped base address for this component.
- * @dev:	the device entity associated to this component.
- * @atclk:	optional clock for the core parts of the ETM.
- * @csdev:	component vitals needed by the framework.
- * @spinlock:	only one at a time pls.
- * @cpu:	the cpu this component is affined to.
- * @port_size:	port size as reported by ETMCR bit 4-6 and 21.
- * @arch:	ETM/PTM version number.
- * @use_cpu14:	true if management registers need to be accessed via CP14.
- * @enable:	is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock:	true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr:	Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp:	Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out:	Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr:	value of register ETMCCR.
- * @etmccer:	value of register ETMCCER.
- * @traceid:	value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
  * @mode:	controls various modes supported by this ETM/PTM.
  * @ctrl:	used in conjunction with @mode.
  * @trigger_event: setting for register ETMTRIGGER.
  * @startstop_ctrl: setting for register ETMTSSCR.
  * @enable_event: setting for register ETMTEEVR.
  * @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
  * @fifofull_level: setting for register ETMFFLR.
  * @addr_idx:	index for the address comparator selection.
  * @addr_val:	value for address comparator register.
@@ -189,36 +174,16 @@
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq:	Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
-		     of a timestamp into the trace stream.
+ *		     of a timestamp into the trace stream.
  */
-struct etm_drvdata {
-	void __iomem			*base;
-	struct device			*dev;
-	struct clk			*atclk;
-	struct coresight_device		*csdev;
-	spinlock_t			spinlock;
-	int				cpu;
-	int				port_size;
-	u8				arch;
-	bool				use_cp14;
-	bool				enable;
-	bool				sticky_enable;
-	bool				boot_enable;
-	bool				os_unlock;
-	u8				nr_addr_cmp;
-	u8				nr_cntr;
-	u8				nr_ext_inp;
-	u8				nr_ext_out;
-	u8				nr_ctxid_cmp;
-	u32				etmccr;
-	u32				etmccer;
-	u32				traceid;
+struct etm_config {
 	u32				mode;
 	u32				ctrl;
 	u32				trigger_event;
 	u32				startstop_ctrl;
 	u32				enable_event;
 	u32				enable_ctrl1;
+	u32				enable_ctrl2;
 	u32				fifofull_level;
 	u8				addr_idx;
 	u32				addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@ struct etm_drvdata {
 	u32				timestamp_event;
 };
 
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base:	memory mapped base address for this component.
+ * @dev:	the device entity associated to this component.
+ * @atclk:	optional clock for the core parts of the ETM.
+ * @csdev:	component vitals needed by the framework.
+ * @spinlock:	only one at a time pls.
+ * @cpu:	the cpu this component is affined to.
+ * @port_size:	port size as reported by ETMCR bit 4-6 and 21.
+ * @arch:	ETM/PTM version number.
+ * @use_cpu14:	true if management registers need to be accessed via CP14.
+ * @mode:	this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock:	true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr:	Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp:	Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out:	Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr:	value of register ETMCCR.
+ * @etmccer:	value of register ETMCCER.
+ * @traceid:	value of the current ID for this component.
+ * @config:	structure holding configuration parameters.
+ */
+struct etm_drvdata {
+	void __iomem			*base;
+	struct device			*dev;
+	struct clk			*atclk;
+	struct coresight_device		*csdev;
+	spinlock_t			spinlock;
+	int				cpu;
+	int				port_size;
+	u8				arch;
+	bool				use_cp14;
+	local_t				mode;
+	bool				sticky_enable;
+	bool				boot_enable;
+	bool				os_unlock;
+	u8				nr_addr_cmp;
+	u8				nr_cntr;
+	u8				nr_ext_inp;
+	u8				nr_ext_out;
+	u8				nr_ctxid_cmp;
+	u32				etmccr;
+	u32				etmccer;
+	u32				traceid;
+	struct etm_config		config;
+};
+
 enum etm_addr_type {
 	ETM_ADDR_TYPE_NONE,
 	ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@ enum etm_addr_type {
 	ETM_ADDR_TYPE_START,
 	ETM_ADDR_TYPE_STOP,
 };
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+			      u32 val, u32 off)
+{
+	if (drvdata->use_cp14) {
+		if (etm_writel_cp14(off, val)) {
+			dev_err(drvdata->dev,
+				"invalid CP14 access to ETM reg: %#x", off);
+		}
+	} else {
+		writel_relaxed(val, drvdata->base + off);
+	}
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+	u32 val;
+
+	if (drvdata->use_cp14) {
+		if (etm_readl_cp14(off, &val)) {
+			dev_err(drvdata->dev,
+				"invalid CP14 access to ETM reg: %#x", off);
+		}
+	} else {
+		val = readl_relaxed(drvdata->base + off);
+	}
+
+	return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
 #endif

+ 1272 - 0
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c

@@ -0,0 +1,1272 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_addr_cmp;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_cntr;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->nr_ctxid_cmp;
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	unsigned long flags, val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	pm_runtime_get_sync(drvdata->dev);
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	CS_UNLOCK(drvdata->base);
+
+	val = etm_readl(drvdata, ETMSR);
+
+	CS_LOCK(drvdata->base);
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	pm_runtime_put(drvdata->dev);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t size)
+{
+	int i, ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val) {
+		spin_lock(&drvdata->spinlock);
+		memset(config, 0, sizeof(struct etm_config));
+		config->mode = ETM_MODE_EXCLUDE;
+		config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+			config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+		}
+
+		etm_set_default(config);
+		spin_unlock(&drvdata->spinlock);
+	}
+
+	return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->mode;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->mode = val & ETM_MODE_ALL;
+
+	if (config->mode & ETM_MODE_EXCLUDE)
+		config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+	else
+		config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+	if (config->mode & ETM_MODE_CYCACC)
+		config->ctrl |= ETMCR_CYC_ACC;
+	else
+		config->ctrl &= ~ETMCR_CYC_ACC;
+
+	if (config->mode & ETM_MODE_STALL) {
+		if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+			dev_warn(drvdata->dev, "stall mode not supported\n");
+			ret = -EINVAL;
+			goto err_unlock;
+		}
+		config->ctrl |= ETMCR_STALL_MODE;
+	 } else
+		config->ctrl &= ~ETMCR_STALL_MODE;
+
+	if (config->mode & ETM_MODE_TIMESTAMP) {
+		if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+			dev_warn(drvdata->dev, "timestamp not supported\n");
+			ret = -EINVAL;
+			goto err_unlock;
+		}
+		config->ctrl |= ETMCR_TIMESTAMP_EN;
+	} else
+		config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+	if (config->mode & ETM_MODE_CTXID)
+		config->ctrl |= ETMCR_CTXID_SIZE;
+	else
+		config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+		etm_config_trace_mode(config);
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+
+err_unlock:
+	spin_unlock(&drvdata->spinlock);
+	return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->trigger_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->trigger_event = val & ETM_EVENT_MASK;
+
+	return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->enable_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->enable_event = val & ETM_EVENT_MASK;
+
+	return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->fifofull_level;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->fifofull_level = val;
+
+	return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->addr_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_addr_cmp)
+		return -EINVAL;
+
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->addr_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EINVAL;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EINVAL;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val1, val2;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val1 = config->addr_val[idx];
+	val2 = config->addr_val[idx + 1];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	u8 idx;
+	unsigned long val1, val2;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+	/* Lower address comparator cannot have a higher address value */
+	if (val1 > val2)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (idx % 2 != 0) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val1;
+	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+	config->addr_val[idx + 1] = val2;
+	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+	config->enable_ctrl1 |= (1 << (idx/2));
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_START;
+	config->startstop_ctrl |= (1 << idx);
+	config->enable_ctrl1 |= BIT(25);
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	u8 idx;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	val = config->addr_val[idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	u8 idx;
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	idx = config->addr_idx;
+	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+		spin_unlock(&drvdata->spinlock);
+		return -EPERM;
+	}
+
+	config->addr_val[idx] = val;
+	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+	config->startstop_ctrl |= (1 << (idx + 16));
+	config->enable_ctrl1 |= ETMTECR1_START_STOP;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->addr_acctype[config->addr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->addr_acctype[config->addr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->cntr_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_cntr)
+		return -EINVAL;
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->cntr_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_rld_val[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_rld_val[config->cntr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_event[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->cntr_rld_event[config->cntr_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	int i, ret = 0;
+	u32 val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (!local_read(&drvdata->mode)) {
+		spin_lock(&drvdata->spinlock);
+		for (i = 0; i < drvdata->nr_cntr; i++)
+			ret += sprintf(buf, "counter %d: %x\n",
+				       i, config->cntr_val[i]);
+		spin_unlock(&drvdata->spinlock);
+		return ret;
+	}
+
+	for (i = 0; i < drvdata->nr_cntr; i++) {
+		val = etm_readl(drvdata, ETMCNTVRn(i));
+		ret += sprintf(buf, "counter %d: %x\n", i, val);
+	}
+
+	return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	config->cntr_val[config->cntr_idx] = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_12_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_12_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_21_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_21_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_23_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_23_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_31_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_31_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_32_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_32_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->seq_13_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->seq_13_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	unsigned long val, flags;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	if (!local_read(&drvdata->mode)) {
+		val = config->seq_curr_state;
+		goto out;
+	}
+
+	pm_runtime_get_sync(drvdata->dev);
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+
+	CS_UNLOCK(drvdata->base);
+	val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+	CS_LOCK(drvdata->base);
+
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	pm_runtime_put(drvdata->dev);
+out:
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val > ETM_SEQ_STATE_MAX_VAL)
+		return -EINVAL;
+
+	config->seq_curr_state = val;
+
+	return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->ctxid_idx;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	if (val >= drvdata->nr_ctxid_cmp)
+		return -EINVAL;
+
+	/*
+	 * Use spinlock to ensure index doesn't change while it gets
+	 * dereferenced multiple times within a spinlock block elsewhere.
+	 */
+	spin_lock(&drvdata->spinlock);
+	config->ctxid_idx = val;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	spin_lock(&drvdata->spinlock);
+	val = config->ctxid_vpid[config->ctxid_idx];
+	spin_unlock(&drvdata->spinlock);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	int ret;
+	unsigned long vpid, pid;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &vpid);
+	if (ret)
+		return ret;
+
+	pid = coresight_vpid_to_pid(vpid);
+
+	spin_lock(&drvdata->spinlock);
+	config->ctxid_pid[config->ctxid_idx] = pid;
+	config->ctxid_vpid[config->ctxid_idx] = vpid;
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->ctxid_mask;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->ctxid_mask = val;
+	return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->sync_freq;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->sync_freq = val & ETM_SYNC_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	val = config->timestamp_event;
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	config->timestamp_event = val & ETM_EVENT_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = drvdata->cpu;
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	val = etm_get_trace_id(drvdata);
+
+	return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t size)
+{
+	int ret;
+	unsigned long val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	drvdata->traceid = val & ETM_TRACEID_MASK;
+	return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+	&dev_attr_nr_addr_cmp.attr,
+	&dev_attr_nr_cntr.attr,
+	&dev_attr_nr_ctxid_cmp.attr,
+	&dev_attr_etmsr.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_trigger_event.attr,
+	&dev_attr_enable_event.attr,
+	&dev_attr_fifofull_level.attr,
+	&dev_attr_addr_idx.attr,
+	&dev_attr_addr_single.attr,
+	&dev_attr_addr_range.attr,
+	&dev_attr_addr_start.attr,
+	&dev_attr_addr_stop.attr,
+	&dev_attr_addr_acctype.attr,
+	&dev_attr_cntr_idx.attr,
+	&dev_attr_cntr_rld_val.attr,
+	&dev_attr_cntr_event.attr,
+	&dev_attr_cntr_rld_event.attr,
+	&dev_attr_cntr_val.attr,
+	&dev_attr_seq_12_event.attr,
+	&dev_attr_seq_21_event.attr,
+	&dev_attr_seq_23_event.attr,
+	&dev_attr_seq_31_event.attr,
+	&dev_attr_seq_32_event.attr,
+	&dev_attr_seq_13_event.attr,
+	&dev_attr_seq_curr_state.attr,
+	&dev_attr_ctxid_idx.attr,
+	&dev_attr_ctxid_pid.attr,
+	&dev_attr_ctxid_mask.attr,
+	&dev_attr_sync_freq.attr,
+	&dev_attr_timestamp_event.attr,
+	&dev_attr_traceid.attr,
+	&dev_attr_cpu.attr,
+	NULL,
+};
+
+#define coresight_simple_func(name, offset)                             \
+static ssize_t name##_show(struct device *_dev,                         \
+			   struct device_attribute *attr, char *buf)    \
+{                                                                       \
+	struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
+			 readl_relaxed(drvdata->base + offset));        \
+}                                                                       \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(etmccr, ETMCCR);
+coresight_simple_func(etmccer, ETMCCER);
+coresight_simple_func(etmscr, ETMSCR);
+coresight_simple_func(etmidr, ETMIDR);
+coresight_simple_func(etmcr, ETMCR);
+coresight_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_simple_func(etmteevr, ETMTEEVR);
+coresight_simple_func(etmtssvr, ETMTSSCR);
+coresight_simple_func(etmtecr1, ETMTECR1);
+coresight_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+	&dev_attr_etmccr.attr,
+	&dev_attr_etmccer.attr,
+	&dev_attr_etmscr.attr,
+	&dev_attr_etmidr.attr,
+	&dev_attr_etmcr.attr,
+	&dev_attr_etmtraceidr.attr,
+	&dev_attr_etmteevr.attr,
+	&dev_attr_etmtssvr.attr,
+	&dev_attr_etmtecr1.attr,
+	&dev_attr_etmtecr2.attr,
+	NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+	.attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+	.attrs = coresight_etm_mgmt_attrs,
+	.name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+	&coresight_etm_group,
+	&coresight_etm_mgmt_group,
+	NULL,
+};

+ 349 - 1388
drivers/hwtracing/coresight/coresight-etm3x.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Program Flow Trace driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -27,14 +29,21 @@
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm.h"
+#include "coresight-etm-perf.h"
 
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 static int etm_count;
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
 
-static inline void etm_writel(struct etm_drvdata *drvdata,
-			      u32 val, u32 off)
-{
-	if (drvdata->use_cp14) {
-		if (etm_writel_cp14(off, val)) {
-			dev_err(drvdata->dev,
-				"invalid CP14 access to ETM reg: %#x", off);
-		}
-	} else {
-		writel_relaxed(val, drvdata->base + off);
-	}
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
-	u32 val;
-
-	if (drvdata->use_cp14) {
-		if (etm_readl_cp14(off, &val)) {
-			dev_err(drvdata->dev,
-				"invalid CP14 access to ETM reg: %#x", off);
-		}
-	} else {
-		val = readl_relaxed(drvdata->base + off);
-	}
-
-	return val;
-}
-
 /*
  * Memory mapped writes to clear os lock are not supported on some processors
  * and OS lock must be unlocked before any memory mapped access on such
  * processors, otherwise memory mapped reads/writes will be invalid.
  */
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
 {
-	struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
 	/* Writing any value to ETMOSLAR unlocks the trace registers */
 	etm_writel(drvdata, 0x0, ETMOSLAR);
+	drvdata->os_unlock = true;
 	isb();
 }
 
@@ -215,1431 +195,450 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
 	}
 }
 
-static void etm_set_default(struct etm_drvdata *drvdata)
-{
-	int i;
-
-	drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->enable_event = ETM_HARD_WIRE_RES_A;
-
-	drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
-	drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
-
-	for (i = 0; i < drvdata->nr_cntr; i++) {
-		drvdata->cntr_rld_val[i] = 0x0;
-		drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
-		drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
-		drvdata->cntr_val[i] = 0x0;
-	}
-
-	drvdata->seq_curr_state = 0x0;
-	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
-		drvdata->ctxid_pid[i] = 0x0;
-		drvdata->ctxid_vpid[i] = 0x0;
-	}
-
-	drvdata->ctxid_mask = 0x0;
-}
-
-static void etm_enable_hw(void *info)
+void etm_set_default(struct etm_config *config)
 {
 	int i;
-	u32 etmcr;
-	struct etm_drvdata *drvdata = info;
 
-	CS_UNLOCK(drvdata->base);
-
-	/* Turn engine on */
-	etm_clr_pwrdwn(drvdata);
-	/* Apply power to trace registers */
-	etm_set_pwrup(drvdata);
-	/* Make sure all registers are accessible */
-	etm_os_unlock(drvdata);
-
-	etm_set_prog(drvdata);
-
-	etmcr = etm_readl(drvdata, ETMCR);
-	etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
-	etmcr |= drvdata->port_size;
-	etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
-	etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
-	etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
-	etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
-	etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
-	etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
-	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-		etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
-		etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
-	}
-	for (i = 0; i < drvdata->nr_cntr; i++) {
-		etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
-		etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
-		etm_writel(drvdata, drvdata->cntr_rld_event[i],
-			   ETMCNTRLDEVRn(i));
-		etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
-	}
-	etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
-	etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
-	etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
-	etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
-	etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
-	etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
-	etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
-	for (i = 0; i < drvdata->nr_ext_out; i++)
-		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
-	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
-	etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
-	etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
-	/* No external input selected */
-	etm_writel(drvdata, 0x0, ETMEXTINSELR);
-	etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
-	/* No auxiliary control selected */
-	etm_writel(drvdata, 0x0, ETMAUXCR);
-	etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
-	/* No VMID comparator value selected */
-	etm_writel(drvdata, 0x0, ETMVMIDCVR);
-
-	/* Ensures trace output is enabled from this ETM */
-	etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
-	etm_clr_prog(drvdata);
-	CS_LOCK(drvdata->base);
-
-	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
-}
-
-static int etm_trace_id(struct coresight_device *csdev)
-{
-	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-	unsigned long flags;
-	int trace_id = -1;
-
-	if (!drvdata->enable)
-		return drvdata->traceid;
-	pm_runtime_get_sync(csdev->dev.parent);
-
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-
-	CS_UNLOCK(drvdata->base);
-	trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-	CS_LOCK(drvdata->base);
-
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(csdev->dev.parent);
-
-	return trace_id;
-}
-
-static int etm_enable(struct coresight_device *csdev)
-{
-	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-	int ret;
-
-	pm_runtime_get_sync(csdev->dev.parent);
-	spin_lock(&drvdata->spinlock);
+	if (WARN_ON_ONCE(!config))
+		return;
 
 	/*
-	 * Configure the ETM only if the CPU is online.  If it isn't online
-	 * hw configuration will take place when 'CPU_STARTING' is received
-	 * in @etm_cpu_callback.
+	 * Taken verbatim from the TRM:
+	 *
+	 * To trace all memory:
+	 *  set bit [24] in register 0x009, the ETMTECR1, to 1
+	 *  set all other bits in register 0x009, the ETMTECR1, to 0
+	 *  set all bits in register 0x007, the ETMTECR2, to 0
+	 *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
 	 */
-	if (cpu_online(drvdata->cpu)) {
-		ret = smp_call_function_single(drvdata->cpu,
-					       etm_enable_hw, drvdata, 1);
-		if (ret)
-			goto err;
-	}
-
-	drvdata->enable = true;
-	drvdata->sticky_enable = true;
+	config->enable_ctrl1 = BIT(24);
+	config->enable_ctrl2 = 0x0;
+	config->enable_event = ETM_HARD_WIRE_RES_A;
 
-	spin_unlock(&drvdata->spinlock);
-
-	dev_info(drvdata->dev, "ETM tracing enabled\n");
-	return 0;
-err:
-	spin_unlock(&drvdata->spinlock);
-	pm_runtime_put(csdev->dev.parent);
-	return ret;
-}
+	config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+	config->enable_event = ETM_HARD_WIRE_RES_A;
 
-static void etm_disable_hw(void *info)
-{
-	int i;
-	struct etm_drvdata *drvdata = info;
-
-	CS_UNLOCK(drvdata->base);
-	etm_set_prog(drvdata);
-
-	/* Program trace enable to low by using always false event */
-	etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
-	/* Read back sequencer and counters for post trace analysis */
-	drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
-
-	for (i = 0; i < drvdata->nr_cntr; i++)
-		drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
-
-	etm_set_pwrdwn(drvdata);
-	CS_LOCK(drvdata->base);
-
-	dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
-}
-
-static void etm_disable(struct coresight_device *csdev)
-{
-	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-	/*
-	 * Taking hotplug lock here protects from clocks getting disabled
-	 * with tracing being left on (crash scenario) if user disable occurs
-	 * after cpu online mask indicates the cpu is offline but before the
-	 * DYING hotplug callback is serviced by the ETM driver.
-	 */
-	get_online_cpus();
-	spin_lock(&drvdata->spinlock);
-
-	/*
-	 * Executing etm_disable_hw on the cpu whose ETM is being disabled
-	 * ensures that register writes occur when cpu is powered.
-	 */
-	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
-	drvdata->enable = false;
-
-	spin_unlock(&drvdata->spinlock);
-	put_online_cpus();
-	pm_runtime_put(csdev->dev.parent);
-
-	dev_info(drvdata->dev, "ETM tracing disabled\n");
-}
-
-static const struct coresight_ops_source etm_source_ops = {
-	.trace_id	= etm_trace_id,
-	.enable		= etm_enable,
-	.disable	= etm_disable,
-};
-
-static const struct coresight_ops etm_cs_ops = {
-	.source_ops	= &etm_source_ops,
-};
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_addr_cmp;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_cntr;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->nr_ctxid_cmp;
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	unsigned long flags, val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	pm_runtime_get_sync(drvdata->dev);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-	CS_UNLOCK(drvdata->base);
-
-	val = etm_readl(drvdata, ETMSR);
-
-	CS_LOCK(drvdata->base);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t size)
-{
-	int i, ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val) {
-		spin_lock(&drvdata->spinlock);
-		drvdata->mode = ETM_MODE_EXCLUDE;
-		drvdata->ctrl = 0x0;
-		drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-		drvdata->startstop_ctrl = 0x0;
-		drvdata->addr_idx = 0x0;
-		for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-			drvdata->addr_val[i] = 0x0;
-			drvdata->addr_acctype[i] = 0x0;
-			drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
-		}
-		drvdata->cntr_idx = 0x0;
-
-		etm_set_default(drvdata);
-		spin_unlock(&drvdata->spinlock);
-	}
-
-	return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
-			 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->mode;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
-			  struct device_attribute *attr,
-			  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->mode = val & ETM_MODE_ALL;
-
-	if (drvdata->mode & ETM_MODE_EXCLUDE)
-		drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
-	else
-		drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
-	if (drvdata->mode & ETM_MODE_CYCACC)
-		drvdata->ctrl |= ETMCR_CYC_ACC;
-	else
-		drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
-	if (drvdata->mode & ETM_MODE_STALL) {
-		if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
-			dev_warn(drvdata->dev, "stall mode not supported\n");
-			ret = -EINVAL;
-			goto err_unlock;
-		}
-		drvdata->ctrl |= ETMCR_STALL_MODE;
-	 } else
-		drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
-	if (drvdata->mode & ETM_MODE_TIMESTAMP) {
-		if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
-			dev_warn(drvdata->dev, "timestamp not supported\n");
-			ret = -EINVAL;
-			goto err_unlock;
-		}
-		drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
-	} else
-		drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
-	if (drvdata->mode & ETM_MODE_CTXID)
-		drvdata->ctrl |= ETMCR_CTXID_SIZE;
-	else
-		drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-
-err_unlock:
-	spin_unlock(&drvdata->spinlock);
-	return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->trigger_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->trigger_event = val & ETM_EVENT_MASK;
-
-	return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->enable_event;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->enable_event = val & ETM_EVENT_MASK;
-
-	return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->fifofull_level;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->fifofull_level = val;
-
-	return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->addr_idx;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val >= drvdata->nr_addr_cmp)
-		return -EINVAL;
-
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->addr_idx = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EINVAL;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EINVAL;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val1, val2;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val1 = drvdata->addr_val[idx];
-	val2 = drvdata->addr_val[idx + 1];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	u8 idx;
-	unsigned long val1, val2;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-		return -EINVAL;
-	/* Lower address comparator cannot have a higher address value */
-	if (val1 > val2)
-		return -EINVAL;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-	if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-	      (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-	       drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val1;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-	drvdata->addr_val[idx + 1] = val2;
-	drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-	drvdata->enable_ctrl1 |= (1 << (idx/2));
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
-	drvdata->startstop_ctrl |= (1 << idx);
-	drvdata->enable_ctrl1 |= BIT(25);
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	u8 idx;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	val = drvdata->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	u8 idx;
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	idx = drvdata->addr_idx;
-	if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-	      drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
-		return -EPERM;
-	}
-
-	drvdata->addr_val[idx] = val;
-	drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
-	drvdata->startstop_ctrl |= (1 << (idx + 16));
-	drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->addr_acctype[drvdata->addr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->addr_acctype[drvdata->addr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->cntr_idx;
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val >= drvdata->nr_cntr)
-		return -EINVAL;
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_idx = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_rld_val[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_event[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	spin_lock(&drvdata->spinlock);
-	val = drvdata->cntr_rld_event[drvdata->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
-
-	return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
-			     struct device_attribute *attr, char *buf)
-{
-	int i, ret = 0;
-	u32 val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	if (!drvdata->enable) {
-		spin_lock(&drvdata->spinlock);
-		for (i = 0; i < drvdata->nr_cntr; i++)
-			ret += sprintf(buf, "counter %d: %x\n",
-				       i, drvdata->cntr_val[i]);
-		spin_unlock(&drvdata->spinlock);
-		return ret;
-	}
-
-	for (i = 0; i < drvdata->nr_cntr; i++) {
-		val = etm_readl(drvdata, ETMCNTVRn(i));
-		ret += sprintf(buf, "counter %d: %x\n", i, val);
-	}
-
-	return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+	config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+	config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	for (i = 0; i < ETM_MAX_CNTR; i++) {
+		config->cntr_rld_val[i] = 0x0;
+		config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+		config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+		config->cntr_val[i] = 0x0;
+	}
 
-	spin_lock(&drvdata->spinlock);
-	drvdata->cntr_val[drvdata->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
+	config->seq_curr_state = 0x0;
+	config->ctxid_idx = 0x0;
+	for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+		config->ctxid_pid[i] = 0x0;
+		config->ctxid_vpid[i] = 0x0;
+	}
 
-	return size;
+	config->ctxid_mask = 0x0;
 }
-static DEVICE_ATTR_RW(cntr_val);
 
-static ssize_t seq_12_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
+void etm_config_trace_mode(struct etm_config *config)
 {
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	u32 flags, mode;
 
-	val = drvdata->seq_12_event;
-	return sprintf(buf, "%#lx\n", val);
-}
+	mode = config->mode;
 
-static ssize_t seq_12_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	/* excluding kernel AND user space doesn't make sense */
+	if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+		return;
 
-	drvdata->seq_12_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
+	/* nothing to do if neither flags are set */
+	if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+		return;
 
-static ssize_t seq_21_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	flags = (1 << 0 |	/* instruction execute */
+		 3 << 3 |	/* ARM instruction */
+		 0 << 5 |	/* No data value comparison */
+		 0 << 7 |	/* No exact mach */
+		 0 << 8);	/* Ignore context ID */
 
-	val = drvdata->seq_21_event;
-	return sprintf(buf, "%#lx\n", val);
-}
+	/* No need to worry about single address comparators. */
+	config->enable_ctrl2 = 0x0;
 
-static ssize_t seq_21_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	/* Bit 0 is address range comparator 1 */
+	config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	/*
+	 * On ETMv3.5:
+	 * ETMACTRn[13,11] == Non-secure state comparison control
+	 * ETMACTRn[12,10] == Secure state comparison control
+	 *
+	 * b00 == Match in all modes in this state
+	 * b01 == Do not match in any more in this state
+	 * b10 == Match in all modes excepts user mode in this state
+	 * b11 == Match only in user mode in this state
+	 */
 
-	drvdata->seq_21_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
+	/* Tracing in secure mode is not supported at this time */
+	flags |= (0 << 12 | 1 << 10);
 
-static ssize_t seq_23_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	if (mode & ETM_MODE_EXCL_USER) {
+		/* exclude user, match all modes except user mode */
+		flags |= (1 << 13 | 0 << 11);
+	} else {
+		/* exclude kernel, match only in user mode */
+		flags |= (1 << 13 | 1 << 11);
+	}
 
-	val = drvdata->seq_23_event;
-	return sprintf(buf, "%#lx\n", val);
+	/*
+	 * The ETMEEVR register is already set to "hard wire A".  As such
+	 * all there is to do is setup an address comparator that spans
+	 * the entire address range and configure the state and mode bits.
+	 */
+	config->addr_val[0] = (u32) 0x0;
+	config->addr_val[1] = (u32) ~0x0;
+	config->addr_acctype[0] = flags;
+	config->addr_acctype[1] = flags;
+	config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+	config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 }
 
-static ssize_t seq_23_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+				  struct perf_event_attr *attr)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_config *config = &drvdata->config;
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	if (!attr)
+		return -EINVAL;
 
-	drvdata->seq_23_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
+	/* Clear configuration from previous run */
+	memset(config, 0, sizeof(struct etm_config));
 
-static ssize_t seq_31_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	if (attr->exclude_kernel)
+		config->mode = ETM_MODE_EXCL_KERN;
 
-	val = drvdata->seq_31_event;
-	return sprintf(buf, "%#lx\n", val);
-}
+	if (attr->exclude_user)
+		config->mode = ETM_MODE_EXCL_USER;
 
-static ssize_t seq_31_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	/* Always start from the default config */
+	etm_set_default(config);
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	/*
+	 * By default the tracers are configured to trace the whole address
+	 * range.  Narrow the field only if requested by user space.
+	 */
+	if (config->mode)
+		etm_config_trace_mode(config);
 
-	drvdata->seq_31_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
+	/*
+	 * At this time only cycle accurate and timestamp options are
+	 * available.
+	 */
+	if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+		return -EINVAL;
 
-static ssize_t seq_32_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	config->ctrl = attr->config;
 
-	val = drvdata->seq_32_event;
-	return sprintf(buf, "%#lx\n", val);
+	return 0;
 }
 
-static ssize_t seq_32_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
+static void etm_enable_hw(void *info)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	int i;
+	u32 etmcr;
+	struct etm_drvdata *drvdata = info;
+	struct etm_config *config = &drvdata->config;
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	CS_UNLOCK(drvdata->base);
 
-	drvdata->seq_32_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
+	/* Turn engine on */
+	etm_clr_pwrdwn(drvdata);
+	/* Apply power to trace registers */
+	etm_set_pwrup(drvdata);
+	/* Make sure all registers are accessible */
+	etm_os_unlock(drvdata);
 
-static ssize_t seq_13_event_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	etm_set_prog(drvdata);
+
+	etmcr = etm_readl(drvdata, ETMCR);
+	/* Clear setting from a previous run if need be */
+	etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
+	etmcr |= drvdata->port_size;
+	etmcr |= ETMCR_ETM_EN;
+	etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+	etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+	etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+	etm_writel(drvdata, config->enable_event, ETMTEEVR);
+	etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+	etm_writel(drvdata, config->fifofull_level, ETMFFLR);
+	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+		etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+		etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
+	}
+	for (i = 0; i < drvdata->nr_cntr; i++) {
+		etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+		etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+		etm_writel(drvdata, config->cntr_rld_event[i],
+			   ETMCNTRLDEVRn(i));
+		etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
+	}
+	etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+	etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+	etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+	etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+	etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+	etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+	etm_writel(drvdata, config->seq_curr_state, ETMSQR);
+	for (i = 0; i < drvdata->nr_ext_out; i++)
+		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
+	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+		etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+	etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+	etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
+	/* No external input selected */
+	etm_writel(drvdata, 0x0, ETMEXTINSELR);
+	etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
+	/* No auxiliary control selected */
+	etm_writel(drvdata, 0x0, ETMAUXCR);
+	etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
+	/* No VMID comparator value selected */
+	etm_writel(drvdata, 0x0, ETMVMIDCVR);
+
+	etm_clr_prog(drvdata);
+	CS_LOCK(drvdata->base);
 
-	val = drvdata->seq_13_event;
-	return sprintf(buf, "%#lx\n", val);
+	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t seq_13_event_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t size)
+static int etm_cpu_id(struct coresight_device *csdev)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	drvdata->seq_13_event = val & ETM_EVENT_MASK;
-	return size;
+	return drvdata->cpu;
 }
-static DEVICE_ATTR_RW(seq_13_event);
 
-static ssize_t seq_curr_state_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
+int etm_get_trace_id(struct etm_drvdata *drvdata)
 {
-	unsigned long val, flags;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long flags;
+	int trace_id = -1;
 
-	if (!drvdata->enable) {
-		val = drvdata->seq_curr_state;
+	if (!drvdata)
 		goto out;
-	}
+
+	if (!local_read(&drvdata->mode))
+		return drvdata->traceid;
 
 	pm_runtime_get_sync(drvdata->dev);
+
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	CS_UNLOCK(drvdata->base);
-	val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+	trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
 	CS_LOCK(drvdata->base);
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	pm_runtime_put(drvdata->dev);
-out:
-	return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	if (val > ETM_SEQ_STATE_MAX_VAL)
-		return -EINVAL;
 
-	drvdata->seq_curr_state = val;
+out:
+	return trace_id;
 
-	return size;
 }
-static DEVICE_ATTR_RW(seq_curr_state);
 
-static ssize_t ctxid_idx_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
+static int etm_trace_id(struct coresight_device *csdev)
 {
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	val = drvdata->ctxid_idx;
-	return sprintf(buf, "%#lx\n", val);
+	return etm_get_trace_id(drvdata);
 }
 
-static ssize_t ctxid_idx_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
+static int etm_enable_perf(struct coresight_device *csdev,
+			   struct perf_event_attr *attr)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	if (val >= drvdata->nr_ctxid_cmp)
+	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
 		return -EINVAL;
 
-	/*
-	 * Use spinlock to ensure index doesn't change while it gets
-	 * dereferenced multiple times within a spinlock block elsewhere.
-	 */
-	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_idx = val;
-	spin_unlock(&drvdata->spinlock);
+	/* Configure the tracer based on the session's specifics */
+	etm_parse_event_config(drvdata, attr);
+	/* And enable it */
+	etm_enable_hw(drvdata);
 
-	return size;
+	return 0;
 }
-static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_pid_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
+static int etm_enable_sysfs(struct coresight_device *csdev)
 {
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	int ret;
 
 	spin_lock(&drvdata->spinlock);
-	val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
-	spin_unlock(&drvdata->spinlock);
-
-	return sprintf(buf, "%#lx\n", val);
-}
 
-static ssize_t ctxid_pid_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	int ret;
-	unsigned long vpid, pid;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	/*
+	 * Configure the ETM only if the CPU is online.  If it isn't online
+	 * hw configuration will take place when 'CPU_STARTING' is received
+	 * in @etm_cpu_callback.
+	 */
+	if (cpu_online(drvdata->cpu)) {
+		ret = smp_call_function_single(drvdata->cpu,
+					       etm_enable_hw, drvdata, 1);
+		if (ret)
+			goto err;
+	}
 
-	ret = kstrtoul(buf, 16, &vpid);
-	if (ret)
-		return ret;
+	drvdata->sticky_enable = true;
+	spin_unlock(&drvdata->spinlock);
 
-	pid = coresight_vpid_to_pid(vpid);
+	dev_info(drvdata->dev, "ETM tracing enabled\n");
+	return 0;
 
-	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
-	drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
+err:
 	spin_unlock(&drvdata->spinlock);
-
-	return size;
+	return ret;
 }
-static DEVICE_ATTR_RW(ctxid_pid);
 
-static ssize_t ctxid_mask_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
+static int etm_enable(struct coresight_device *csdev,
+		      struct perf_event_attr *attr, u32 mode)
 {
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	int ret;
+	u32 val;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	val = drvdata->ctxid_mask;
-	return sprintf(buf, "%#lx\n", val);
-}
+	val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 
-static ssize_t ctxid_mask_store(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	/* Someone is already using the tracer */
+	if (val)
+		return -EBUSY;
+
+	switch (mode) {
+	case CS_MODE_SYSFS:
+		ret = etm_enable_sysfs(csdev);
+		break;
+	case CS_MODE_PERF:
+		ret = etm_enable_perf(csdev, attr);
+		break;
+	default:
+		ret = -EINVAL;
+	}
 
-	ret = kstrtoul(buf, 16, &val);
+	/* The tracer didn't start */
 	if (ret)
-		return ret;
+		local_set(&drvdata->mode, CS_MODE_DISABLED);
 
-	drvdata->ctxid_mask = val;
-	return size;
+	return ret;
 }
-static DEVICE_ATTR_RW(ctxid_mask);
 
-static ssize_t sync_freq_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
+static void etm_disable_hw(void *info)
 {
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	val = drvdata->sync_freq;
-	return sprintf(buf, "%#lx\n", val);
-}
+	int i;
+	struct etm_drvdata *drvdata = info;
+	struct etm_config *config = &drvdata->config;
 
-static ssize_t sync_freq_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t size)
-{
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	CS_UNLOCK(drvdata->base);
+	etm_set_prog(drvdata);
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	/* Read back sequencer and counters for post trace analysis */
+	config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 
-	drvdata->sync_freq = val & ETM_SYNC_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
+	for (i = 0; i < drvdata->nr_cntr; i++)
+		config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 
-static ssize_t timestamp_event_show(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	etm_set_pwrdwn(drvdata);
+	CS_LOCK(drvdata->base);
 
-	val = drvdata->timestamp_event;
-	return sprintf(buf, "%#lx\n", val);
+	dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t timestamp_event_store(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buf, size_t size)
+static void etm_disable_perf(struct coresight_device *csdev)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
+	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+		return;
 
-	drvdata->timestamp_event = val & ETM_EVENT_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
+	CS_UNLOCK(drvdata->base);
 
-static ssize_t cpu_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-	int val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	/* Setting the prog bit disables tracing immediately */
+	etm_set_prog(drvdata);
 
-	val = drvdata->cpu;
-	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+	/*
+	 * There is no way to know when the tracer will be used again so
+	 * power down the tracer.
+	 */
+	etm_set_pwrdwn(drvdata);
 
+	CS_LOCK(drvdata->base);
 }
-static DEVICE_ATTR_RO(cpu);
 
-static ssize_t traceid_show(struct device *dev,
-			    struct device_attribute *attr, char *buf)
+static void etm_disable_sysfs(struct coresight_device *csdev)
 {
-	unsigned long val, flags;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	if (!drvdata->enable) {
-		val = drvdata->traceid;
-		goto out;
-	}
+	/*
+	 * Taking hotplug lock here protects from clocks getting disabled
+	 * with tracing being left on (crash scenario) if user disable occurs
+	 * after cpu online mask indicates the cpu is offline but before the
+	 * DYING hotplug callback is serviced by the ETM driver.
+	 */
+	get_online_cpus();
+	spin_lock(&drvdata->spinlock);
 
-	pm_runtime_get_sync(drvdata->dev);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-	CS_UNLOCK(drvdata->base);
+	/*
+	 * Executing etm_disable_hw on the cpu whose ETM is being disabled
+	 * ensures that register writes occur when cpu is powered.
+	 */
+	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 
-	val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+	spin_unlock(&drvdata->spinlock);
+	put_online_cpus();
 
-	CS_LOCK(drvdata->base);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
-out:
-	return sprintf(buf, "%#lx\n", val);
+	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
-static ssize_t traceid_store(struct device *dev,
-			     struct device_attribute *attr,
-			     const char *buf, size_t size)
+static void etm_disable(struct coresight_device *csdev)
 {
-	int ret;
-	unsigned long val;
-	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-	ret = kstrtoul(buf, 16, &val);
-	if (ret)
-		return ret;
-
-	drvdata->traceid = val & ETM_TRACEID_MASK;
-	return size;
-}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
-	&dev_attr_nr_addr_cmp.attr,
-	&dev_attr_nr_cntr.attr,
-	&dev_attr_nr_ctxid_cmp.attr,
-	&dev_attr_etmsr.attr,
-	&dev_attr_reset.attr,
-	&dev_attr_mode.attr,
-	&dev_attr_trigger_event.attr,
-	&dev_attr_enable_event.attr,
-	&dev_attr_fifofull_level.attr,
-	&dev_attr_addr_idx.attr,
-	&dev_attr_addr_single.attr,
-	&dev_attr_addr_range.attr,
-	&dev_attr_addr_start.attr,
-	&dev_attr_addr_stop.attr,
-	&dev_attr_addr_acctype.attr,
-	&dev_attr_cntr_idx.attr,
-	&dev_attr_cntr_rld_val.attr,
-	&dev_attr_cntr_event.attr,
-	&dev_attr_cntr_rld_event.attr,
-	&dev_attr_cntr_val.attr,
-	&dev_attr_seq_12_event.attr,
-	&dev_attr_seq_21_event.attr,
-	&dev_attr_seq_23_event.attr,
-	&dev_attr_seq_31_event.attr,
-	&dev_attr_seq_32_event.attr,
-	&dev_attr_seq_13_event.attr,
-	&dev_attr_seq_curr_state.attr,
-	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_pid.attr,
-	&dev_attr_ctxid_mask.attr,
-	&dev_attr_sync_freq.attr,
-	&dev_attr_timestamp_event.attr,
-	&dev_attr_traceid.attr,
-	&dev_attr_cpu.attr,
-	NULL,
-};
+	u32 mode;
+	struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-#define coresight_simple_func(name, offset)                             \
-static ssize_t name##_show(struct device *_dev,                         \
-			   struct device_attribute *attr, char *buf)    \
-{                                                                       \
-	struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
-	return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
-			 readl_relaxed(drvdata->base + offset));        \
-}                                                                       \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
-	&dev_attr_etmccr.attr,
-	&dev_attr_etmccer.attr,
-	&dev_attr_etmscr.attr,
-	&dev_attr_etmidr.attr,
-	&dev_attr_etmcr.attr,
-	&dev_attr_etmtraceidr.attr,
-	&dev_attr_etmteevr.attr,
-	&dev_attr_etmtssvr.attr,
-	&dev_attr_etmtecr1.attr,
-	&dev_attr_etmtecr2.attr,
-	NULL,
-};
+	/*
+	 * For as long as the tracer isn't disabled another entity can't
+	 * change its status.  As such we can read the status here without
+	 * fearing it will change under us.
+	 */
+	mode = local_read(&drvdata->mode);
 
-static const struct attribute_group coresight_etm_group = {
-	.attrs = coresight_etm_attrs,
-};
+	switch (mode) {
+	case CS_MODE_DISABLED:
+		break;
+	case CS_MODE_SYSFS:
+		etm_disable_sysfs(csdev);
+		break;
+	case CS_MODE_PERF:
+		etm_disable_perf(csdev);
+		break;
+	default:
+		WARN_ON_ONCE(mode);
+		return;
+	}
 
+	if (mode)
+		local_set(&drvdata->mode, CS_MODE_DISABLED);
+}
 
-static const struct attribute_group coresight_etm_mgmt_group = {
-	.attrs = coresight_etm_mgmt_attrs,
-	.name = "mgmt",
+static const struct coresight_ops_source etm_source_ops = {
+	.cpu_id		= etm_cpu_id,
+	.trace_id	= etm_trace_id,
+	.enable		= etm_enable,
+	.disable	= etm_disable,
 };
 
-static const struct attribute_group *coresight_etm_groups[] = {
-	&coresight_etm_group,
-	&coresight_etm_mgmt_group,
-	NULL,
+static const struct coresight_ops etm_cs_ops = {
+	.source_ops	= &etm_source_ops,
 };
 
 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
 			etmdrvdata[cpu]->os_unlock = true;
 		}
 
-		if (etmdrvdata[cpu]->enable)
+		if (local_read(&etmdrvdata[cpu]->mode))
 			etm_enable_hw(etmdrvdata[cpu]);
 		spin_unlock(&etmdrvdata[cpu]->spinlock);
 		break;
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
 
 	case CPU_DYING:
 		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (etmdrvdata[cpu]->enable)
+		if (local_read(&etmdrvdata[cpu]->mode))
 			etm_disable_hw(etmdrvdata[cpu]);
 		spin_unlock(&etmdrvdata[cpu]->spinlock);
 		break;
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info)
 	u32 etmccr;
 	struct etm_drvdata *drvdata = info;
 
+	/* Make sure all registers are accessible */
+	etm_os_unlock(drvdata);
+
 	CS_UNLOCK(drvdata->base);
 
 	/* First dummy read */
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info)
 	CS_LOCK(drvdata->base);
 }
 
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
 {
-	/*
-	 * A trace ID of value 0 is invalid, so let's start at some
-	 * random value that fits in 7 bits and will be just as good.
-	 */
-	static int etm3x_traceid = 0x10;
-
-	u32 flags = (1 << 0 | /* instruction execute*/
-		     3 << 3 | /* ARM instruction */
-		     0 << 5 | /* No data value comparison */
-		     0 << 7 | /* No exact mach */
-		     0 << 8 | /* Ignore context ID */
-		     0 << 10); /* Security ignored */
-
-	/*
-	 * Initial configuration only - guarantees sources handled by
-	 * this driver have a unique ID at startup time but not between
-	 * all other types of sources.  For that we lean on the core
-	 * framework.
-	 */
-	drvdata->traceid = etm3x_traceid++;
-	drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
-	drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
-	if (drvdata->nr_addr_cmp >= 2) {
-		drvdata->addr_val[0] = (u32) _stext;
-		drvdata->addr_val[1] = (u32) _etext;
-		drvdata->addr_acctype[0] = flags;
-		drvdata->addr_acctype[1] = flags;
-		drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
-		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
-	}
-
-	etm_set_default(drvdata);
+	drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 }
 
 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 	get_online_cpus();
 	etmdrvdata[drvdata->cpu] = drvdata;
 
-	if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
-		drvdata->os_unlock = true;
-
 	if (smp_call_function_single(drvdata->cpu,
 				     etm_init_arch_data,  drvdata, 1))
 		dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 		ret = -EINVAL;
 		goto err_arch_supported;
 	}
-	etm_init_default_data(drvdata);
+
+	etm_init_trace_id(drvdata);
+	etm_set_default(&drvdata->config);
 
 	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
 	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 		goto err_arch_supported;
 	}
 
+	ret = etm_perf_symlink(drvdata->csdev, true);
+	if (ret) {
+		coresight_unregister(drvdata->csdev);
+		goto err_arch_supported;
+	}
+
 	pm_runtime_put(&adev->dev);
 	dev_info(dev, "%s initialized\n", (char *)id->data);
 
@@ -1877,17 +853,6 @@ err_arch_supported:
 	return ret;
 }
 
-static int etm_remove(struct amba_device *adev)
-{
-	struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	if (--etm_count == 0)
-		unregister_hotcpu_notifier(&etm_cpu_notifier);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int etm_runtime_suspend(struct device *dev)
 {
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = {
 		.name	= "coresight-etm3x",
 		.owner	= THIS_MODULE,
 		.pm	= &etm_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= etm_probe,
-	.remove		= etm_remove,
 	.id_table	= etm_ids,
 };
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);

+ 14 - 23
drivers/hwtracing/coresight/coresight-etm4x.c

@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -32,6 +31,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm4x.h"
@@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch)
 	return true;
 }
 
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	return drvdata->cpu;
+}
+
 static int etm4_trace_id(struct coresight_device *csdev)
 {
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
 	if (!drvdata->enable)
 		return drvdata->trcid;
 
-	pm_runtime_get_sync(drvdata->dev);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	CS_UNLOCK(drvdata->base);
@@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
 	CS_LOCK(drvdata->base);
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
-	pm_runtime_put(drvdata->dev);
 
 	return trace_id;
 }
@@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info)
 	dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static int etm4_enable(struct coresight_device *csdev)
+static int etm4_enable(struct coresight_device *csdev,
+		       struct perf_event_attr *attr, u32 mode)
 {
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 	int ret;
 
-	pm_runtime_get_sync(drvdata->dev);
 	spin_lock(&drvdata->spinlock);
 
 	/*
@@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev)
 	return 0;
 err:
 	spin_unlock(&drvdata->spinlock);
-	pm_runtime_put(drvdata->dev);
 	return ret;
 }
 
@@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev)
 	spin_unlock(&drvdata->spinlock);
 	put_online_cpus();
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
 static const struct coresight_ops_source etm4_source_ops = {
+	.cpu_id		= etm4_cpu_id,
 	.trace_id	= etm4_trace_id,
 	.enable		= etm4_enable,
 	.disable	= etm4_disable,
@@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev,				\
 	return scnprintf(buf, PAGE_SIZE, "0x%x\n",			\
 			 readl_relaxed(drvdata->base + offset));	\
 }									\
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
 
 coresight_simple_func(trcoslsr, TRCOSLSR);
 coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2687,6 @@ err_coresight_register:
 	return ret;
 }
 
-static int etm4_remove(struct amba_device *adev)
-{
-	struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	if (--etm4_count == 0)
-		unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
-	return 0;
-}
-
 static struct amba_id etm4_ids[] = {
 	{       /* ETM 4.0 - Qualcomm */
 		.id	= 0x0003b95d,
@@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = {
 static struct amba_driver etm4x_driver = {
 	.drv = {
 		.name   = "coresight-etm4x",
+		.suppress_bind_attrs = true,
 	},
 	.probe		= etm4_probe,
-	.remove		= etm4_remove,
 	.id_table	= etm4_ids,
 };
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);

+ 4 - 17
drivers/hwtracing/coresight/coresight-funnel.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Funnel driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
 {
 	struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
 	funnel_enable_hw(drvdata, inport);
 
 	dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
 	struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 	funnel_disable_hw(drvdata, inport);
-	pm_runtime_put(drvdata->dev);
 
 	dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
 }
@@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
 	return 0;
 }
 
-static int funnel_remove(struct amba_device *adev)
-{
-	struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int funnel_runtime_suspend(struct device *dev)
 {
@@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = {
 		.name	= "coresight-funnel",
 		.owner	= THIS_MODULE,
 		.pm	= &funnel_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= funnel_probe,
-	.remove		= funnel_remove,
 	.id_table	= funnel_ids,
 };
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);

+ 15 - 0
drivers/hwtracing/coresight/coresight-priv.h

@@ -34,6 +34,15 @@
 #define TIMEOUT_US		100
 #define BMVAL(val, lsb, msb)	((val & GENMASK(msb, lsb)) >> lsb)
 
+#define ETM_MODE_EXCL_KERN	BIT(30)
+#define ETM_MODE_EXCL_USER	BIT(31)
+
+enum cs_mode {
+	CS_MODE_DISABLED,
+	CS_MODE_SYSFS,
+	CS_MODE_PERF,
+};
+
 static inline void CS_LOCK(void __iomem *addr)
 {
 	do {
@@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr)
 	} while (0);
 }
 
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
 extern int etm_readl_cp14(u32 off, unsigned int *val);
 extern int etm_writel_cp14(u32 off, u32 val);

+ 2 - 17
drivers/hwtracing/coresight/coresight-replicator-qcom.c

@@ -15,7 +15,6 @@
 #include <linux/clk.h>
 #include <linux/coresight.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
 	struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
-
 	CS_UNLOCK(drvdata->base);
 
 	/*
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 
 	CS_LOCK(drvdata->base);
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
 	return 0;
 }
 
-static int replicator_remove(struct amba_device *adev)
-{
-	struct replicator_state *drvdata = amba_get_drvdata(adev);
-
-	pm_runtime_disable(&adev->dev);
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = {
 	.drv = {
 		.name	= "coresight-replicator-qcom",
 		.pm	= &replicator_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= replicator_probe,
-	.remove		= replicator_remove,
 	.id_table	= replicator_ids,
 };
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);

+ 3 - 22
drivers/hwtracing/coresight/coresight-replicator.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Replicator driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
 	struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(drvdata->dev);
 	dev_info(drvdata->dev, "REPLICATOR enabled\n");
 	return 0;
 }
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 {
 	struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_put(drvdata->dev);
 	dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -127,20 +126,6 @@ out_disable_pm:
 	return ret;
 }
 
-static int replicator_remove(struct platform_device *pdev)
-{
-	struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
-	coresight_unregister(drvdata->csdev);
-	pm_runtime_get_sync(&pdev->dev);
-	if (!IS_ERR(drvdata->atclk))
-		clk_disable_unprepare(drvdata->atclk);
-	pm_runtime_put_noidle(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = {
 
 static struct platform_driver replicator_driver = {
 	.probe          = replicator_probe,
-	.remove         = replicator_remove,
 	.driver         = {
 		.name   = "coresight-replicator",
 		.of_match_table = replicator_match,
 		.pm	= &replicator_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
-
 builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");

+ 7 - 28
drivers/hwtracing/coresight/coresight-tmc.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Memory Controller driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -124,7 +125,7 @@ struct tmc_drvdata {
 	bool			reading;
 	char			*buf;
 	dma_addr_t		paddr;
-	void __iomem		*vaddr;
+	void			*vaddr;
 	u32			size;
 	bool			enable;
 	enum tmc_config_type	config_type;
@@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
 {
 	unsigned long flags;
 
-	pm_runtime_get_sync(drvdata->dev);
-
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		pm_runtime_put(drvdata->dev);
 		return -EBUSY;
 	}
 
@@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
 	return 0;
 }
 
-static int tmc_enable_sink(struct coresight_device *csdev)
+static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
 {
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -381,8 +379,6 @@ out:
 	drvdata->enable = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	pm_runtime_put(drvdata->dev);
-
 	dev_info(drvdata->dev, "TMC disabled\n");
 }
 
@@ -766,23 +762,10 @@ err_misc_register:
 err_devm_kzalloc:
 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
 		dma_free_coherent(dev, drvdata->size,
-				&drvdata->paddr, GFP_KERNEL);
+				drvdata->vaddr, drvdata->paddr);
 	return ret;
 }
 
-static int tmc_remove(struct amba_device *adev)
-{
-	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
-	misc_deregister(&drvdata->miscdev);
-	coresight_unregister(drvdata->csdev);
-	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
-		dma_free_coherent(drvdata->dev, drvdata->size,
-				  &drvdata->paddr, GFP_KERNEL);
-
-	return 0;
-}
-
 static struct amba_id tmc_ids[] = {
 	{
 		.id     = 0x0003b961,
@@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = {
 	.drv = {
 		.name   = "coresight-tmc",
 		.owner  = THIS_MODULE,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= tmc_probe,
-	.remove		= tmc_remove,
 	.id_table	= tmc_ids,
 };
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);

+ 5 - 18
drivers/hwtracing/coresight/coresight-tpiu.c

@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Port Interface Unit driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
 	CS_LOCK(drvdata->base);
 }
 
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
 {
 	struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	pm_runtime_get_sync(csdev->dev.parent);
 	tpiu_enable_hw(drvdata);
 
 	dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev)
 	struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
 	tpiu_disable_hw(drvdata);
-	pm_runtime_put(csdev->dev.parent);
 
 	dev_info(drvdata->dev, "TPIU disabled\n");
 }
@@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
 	return 0;
 }
 
-static int tpiu_remove(struct amba_device *adev)
-{
-	struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
-	coresight_unregister(drvdata->csdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int tpiu_runtime_suspend(struct device *dev)
 {
@@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = {
 		.name	= "coresight-tpiu",
 		.owner	= THIS_MODULE,
 		.pm	= &tpiu_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= tpiu_probe,
-	.remove		= tpiu_remove,
 	.id_table	= tpiu_ids,
 };
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);

+ 279 - 109
drivers/hwtracing/coresight/coresight.c

@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -24,11 +23,28 @@
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "coresight-priv.h"
 
 static DEFINE_MUTEX(coresight_mutex);
 
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev:	Address of an element.
+ * @link:	hook to the list.
+ */
+struct coresight_node {
+	struct coresight_device *csdev;
+	struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+
 static int coresight_id_match(struct device *dev, void *data)
 {
 	int trace_id, i_trace_id;
@@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev)
 				 csdev, coresight_id_match);
 }
 
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+				      struct coresight_device *parent)
 {
 	int i;
-	struct coresight_device *parent;
 	struct coresight_connection *conn;
 
-	parent = container_of(csdev->path_link.next,
-			      struct coresight_device, path_link);
-
 	for (i = 0; i < parent->nr_outport; i++) {
 		conn = &parent->conns[i];
 		if (conn->child_dev == csdev)
@@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
 	return 0;
 }
 
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+				       struct coresight_device *child)
 {
 	int i;
-	struct coresight_device *child;
 	struct coresight_connection *conn;
 
-	child = container_of(csdev->path_link.prev,
-			     struct coresight_device, path_link);
-
 	for (i = 0; i < csdev->nr_outport; i++) {
 		conn = &csdev->conns[i];
 		if (conn->child_dev == child)
@@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
 	return 0;
 }
 
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
 {
 	int ret;
 
 	if (!csdev->enable) {
 		if (sink_ops(csdev)->enable) {
-			ret = sink_ops(csdev)->enable(csdev);
+			ret = sink_ops(csdev)->enable(csdev, mode);
 			if (ret)
 				return ret;
 		}
@@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev)
 	}
 }
 
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+				 struct coresight_device *parent,
+				 struct coresight_device *child)
 {
 	int ret;
 	int link_subtype;
 	int refport, inport, outport;
 
-	inport = coresight_find_link_inport(csdev);
-	outport = coresight_find_link_outport(csdev);
+	if (!parent || !child)
+		return -EINVAL;
+
+	inport = coresight_find_link_inport(csdev, parent);
+	outport = coresight_find_link_outport(csdev, child);
 	link_subtype = csdev->subtype.link_subtype;
 
 	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev)
 	return 0;
 }
 
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+				   struct coresight_device *parent,
+				   struct coresight_device *child)
 {
 	int i, nr_conns;
 	int link_subtype;
 	int refport, inport, outport;
 
-	inport = coresight_find_link_inport(csdev);
-	outport = coresight_find_link_outport(csdev);
+	if (!parent || !child)
+		return;
+
+	inport = coresight_find_link_inport(csdev, parent);
+	outport = coresight_find_link_outport(csdev, child);
 	link_subtype = csdev->subtype.link_subtype;
 
 	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev)
 	csdev->enable = false;
 }
 
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 {
 	int ret;
 
@@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev)
 
 	if (!csdev->enable) {
 		if (source_ops(csdev)->enable) {
-			ret = source_ops(csdev)->enable(csdev);
+			ret = source_ops(csdev)->enable(csdev, NULL, mode);
 			if (ret)
 				return ret;
 		}
@@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev)
 	}
 }
 
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
 {
-	int ret = 0;
-	struct coresight_device *cd;
-
-	/*
-	 * At this point we have a full @path, from source to sink.  The
-	 * sink is the first entry and the source the last one.  Go through
-	 * all the components and enable them one by one.
-	 */
-	list_for_each_entry(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			ret = coresight_enable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			/*
-			 * Don't enable the source just yet - this needs to
-			 * happen at the very end when all links and sink
-			 * along the path have been configured properly.
-			 */
-			;
-		} else {
-			ret = coresight_enable_link(cd);
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
+
+	list_for_each_entry(nd, path, link) {
+		csdev = nd->csdev;
+
+		switch (csdev->type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+		case CORESIGHT_DEV_TYPE_LINKSINK:
+			coresight_disable_sink(csdev);
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			/* sources are disabled from either sysFS or Perf */
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			coresight_disable_link(csdev, parent, child);
+			break;
+		default:
+			break;
 		}
-		if (ret)
-			goto err;
 	}
+}
 
-	return 0;
-err:
-	list_for_each_entry_continue_reverse(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			coresight_disable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			;
-		} else {
-			coresight_disable_link(cd);
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
+	int ret = 0;
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
+
+	list_for_each_entry_reverse(nd, path, link) {
+		csdev = nd->csdev;
+
+		switch (csdev->type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+		case CORESIGHT_DEV_TYPE_LINKSINK:
+			ret = coresight_enable_sink(csdev, mode);
+			if (ret)
+				goto err;
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			/* sources are enabled from either sysFS or Perf */
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			ret = coresight_enable_link(csdev, parent, child);
+			if (ret)
+				goto err;
+			break;
+		default:
+			goto err;
 		}
 	}
 
+out:
 	return ret;
+err:
+	coresight_disable_path(path);
+	goto out;
 }
 
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
 {
-	struct coresight_device *cd;
+	struct coresight_device *csdev;
 
-	list_for_each_entry_reverse(cd, path, path_link) {
-		if (cd == list_first_entry(path, struct coresight_device,
-					   path_link)) {
-			coresight_disable_sink(cd);
-		} else if (list_is_last(&cd->path_link, path)) {
-			/*
-			 * The source has already been stopped, no need
-			 * to do it again here.
-			 */
-			;
-		} else {
-			coresight_disable_link(cd);
+	if (!path)
+		return NULL;
+
+	csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+	if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+	    csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+		return NULL;
+
+	return csdev;
+}
+
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev:	The device to start from.
+ * @path:	The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found.  From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+				 struct list_head *path)
+{
+	int i;
+	bool found = false;
+	struct coresight_node *node;
+	struct coresight_connection *conn;
+
+	/* An activated sink has been found.  Enqueue the element */
+	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+		goto out;
+
+	/* Not a sink - recursively explore each port found on this element */
+	for (i = 0; i < csdev->nr_outport; i++) {
+		conn = &csdev->conns[i];
+		if (_coresight_build_path(conn->child_dev, path) == 0) {
+			found = true;
+			break;
 		}
 	}
 
+	if (!found)
+		return -ENODEV;
+
+out:
+	/*
+	 * A path from this element to a sink has been found.  The elements
+	 * leading to the sink are already enqueued, all that is left to do
+	 * is tell the PM runtime core we need this element and add a node
+	 * for it.
+	 */
+	node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->csdev = csdev;
+	list_add(&node->link, path);
+	pm_runtime_get_sync(csdev->dev.parent);
+
 	return 0;
 }
 
-static int coresight_build_paths(struct coresight_device *csdev,
-				 struct list_head *path,
-				 bool enable)
+struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
-	int i, ret = -EINVAL;
-	struct coresight_connection *conn;
+	struct list_head *path;
 
-	list_add(&csdev->path_link, path);
+	path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!path)
+		return NULL;
 
-	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
-	    csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
-	    csdev->activated) {
-		if (enable)
-			ret = coresight_enable_path(path);
-		else
-			ret = coresight_disable_path(path);
-	} else {
-		for (i = 0; i < csdev->nr_outport; i++) {
-			conn = &csdev->conns[i];
-			if (coresight_build_paths(conn->child_dev,
-						    path, enable) == 0)
-				ret = 0;
-		}
+	INIT_LIST_HEAD(path);
+
+	if (_coresight_build_path(csdev, path)) {
+		kfree(path);
+		path = NULL;
 	}
 
-	if (list_first_entry(path, struct coresight_device, path_link) != csdev)
-		dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+	return path;
+}
 
-	list_del(&csdev->path_link);
+/**
+ * coresight_release_path - release a previously built path.
+ * @path:	the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+	struct coresight_device *csdev;
+	struct coresight_node *nd, *next;
 
-	return ret;
+	list_for_each_entry_safe(nd, next, path, link) {
+		csdev = nd->csdev;
+
+		pm_runtime_put_sync(csdev->dev.parent);
+		list_del(&nd->link);
+		kfree(nd);
+	}
+
+	kfree(path);
+	path = NULL;
 }
 
 int coresight_enable(struct coresight_device *csdev)
 {
 	int ret = 0;
-	LIST_HEAD(path);
+	int cpu;
+	struct list_head *path;
 
 	mutex_lock(&coresight_mutex);
 	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev)
 	if (csdev->enable)
 		goto out;
 
-	if (coresight_build_paths(csdev, &path, true)) {
-		dev_err(&csdev->dev, "building path(s) failed\n");
+	path = coresight_build_path(csdev);
+	if (!path) {
+		pr_err("building path(s) failed\n");
 		goto out;
 	}
 
-	if (coresight_enable_source(csdev))
-		dev_err(&csdev->dev, "source enable failed\n");
+	ret = coresight_enable_path(path, CS_MODE_SYSFS);
+	if (ret)
+		goto err_path;
+
+	ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+	if (ret)
+		goto err_source;
+
+	/*
+	 * When working from sysFS it is important to keep track
+	 * of the paths that were created so that they can be
+	 * undone in 'coresight_disable()'.  Since there can only
+	 * be a single session per tracer (when working from sysFS)
+	 * a per-cpu variable will do just fine.
+	 */
+	cpu = source_ops(csdev)->cpu_id(csdev);
+	per_cpu(sysfs_path, cpu) = path;
+
 out:
 	mutex_unlock(&coresight_mutex);
 	return ret;
+
+err_source:
+	coresight_disable_path(path);
+
+err_path:
+	coresight_release_path(path);
+	goto out;
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
 void coresight_disable(struct coresight_device *csdev)
 {
-	LIST_HEAD(path);
+	int cpu;
+	struct list_head *path;
 
 	mutex_lock(&coresight_mutex);
 	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev)
 	if (!csdev->enable)
 		goto out;
 
+	cpu = source_ops(csdev)->cpu_id(csdev);
+	path = per_cpu(sysfs_path, cpu);
 	coresight_disable_source(csdev);
-	if (coresight_build_paths(csdev, &path, false))
-		dev_err(&csdev->dev, "releasing path(s) failed\n");
+	coresight_disable_path(path);
+	coresight_release_path(path);
+	per_cpu(sysfs_path, cpu) = NULL;
 
 out:
 	mutex_unlock(&coresight_mutex);
@@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev)
 {
 	struct coresight_device *csdev = to_coresight_device(dev);
 
+	kfree(csdev->conns);
+	kfree(csdev->refcnt);
 	kfree(csdev);
 }
 
@@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
 	 * are hooked-up with each newly added component.
 	 */
 	bus_for_each_dev(&coresight_bustype, NULL,
-				 csdev, coresight_orphan_match);
+			 csdev, coresight_orphan_match);
 }
 
 
@@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
 
 		if (dev) {
 			conn->child_dev = to_coresight_device(dev);
+			/* and put reference from 'bus_find_device()' */
+			put_device(dev);
 		} else {
 			csdev->orphan = true;
 			conn->child_dev = NULL;
@@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
 	}
 }
 
+static int coresight_remove_match(struct device *dev, void *data)
+{
+	int i;
+	struct coresight_device *csdev, *iterator;
+	struct coresight_connection *conn;
+
+	csdev = data;
+	iterator = to_coresight_device(dev);
+
+	/* No need to check oneself */
+	if (csdev == iterator)
+		return 0;
+
+	/*
+	 * Circle throuch all the connection of that component.  If we find
+	 * a connection whose name matches @csdev, remove it.
+	 */
+	for (i = 0; i < iterator->nr_outport; i++) {
+		conn = &iterator->conns[i];
+
+		if (conn->child_dev == NULL)
+			continue;
+
+		if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+			iterator->orphan = true;
+			conn->child_dev = NULL;
+			/* No need to continue */
+			break;
+		}
+	}
+
+	/*
+	 * Returning '0' ensures that all known component on the
+	 * bus will be checked.
+	 */
+	return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+	bus_for_each_dev(&coresight_bustype, NULL,
+			 csdev, coresight_remove_match);
+}
+
 /**
  * coresight_timeout - loop until a bit has changed to a specific state.
  * @addr: base address of the area of interest.
@@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register);
 
 void coresight_unregister(struct coresight_device *csdev)
 {
-	mutex_lock(&coresight_mutex);
-
-	kfree(csdev->conns);
+	/* Remove references of that device in the topology */
+	coresight_remove_conns(csdev);
 	device_unregister(&csdev->dev);
-
-	mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");

+ 1 - 2
drivers/hwtracing/coresight/of_coresight.c

@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev,
 		return -ENOMEM;
 
 	/* Children connected to this component via @outports */
-	 pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+	pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
 					  sizeof(*pdata->child_names),
 					  GFP_KERNEL);
 	if (!pdata->child_names)

+ 1 - 0
drivers/hwtracing/intel_th/Kconfig

@@ -1,5 +1,6 @@
 config INTEL_TH
 	tristate "Intel(R) Trace Hub controller"
+	depends on HAS_DMA && HAS_IOMEM
 	help
 	  Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
 	  produce, switch and output trace data from multiple hardware and

+ 27 - 3
drivers/hwtracing/intel_th/core.c

@@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = {
 	.release	= intel_th_device_release,
 };
 
+static struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+	/*
+	 * subdevice tree is flat: if this one is not a switch, its
+	 * parent must be
+	 */
+	if (thdev->type != INTEL_TH_SWITCH)
+		thdev = to_intel_th_hub(thdev);
+
+	if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
+		return NULL;
+
+	return dev_get_drvdata(thdev->dev.parent);
+}
+
 static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
 				     kuid_t *uid, kgid_t *gid)
 {
 	struct intel_th_device *thdev = to_intel_th_device(dev);
+	struct intel_th *th = to_intel_th(thdev);
 	char *node;
 
 	if (thdev->id >= 0)
-		node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
-				 thdev->id);
+		node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
+				 thdev->name, thdev->id);
 	else
-		node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
+		node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
+				 thdev->name);
 
 	return node;
 }
@@ -319,6 +336,7 @@ static struct intel_th_subdevice {
 	unsigned		nres;
 	unsigned		type;
 	unsigned		otype;
+	unsigned		scrpd;
 	int			id;
 } intel_th_subdevices[TH_SUBDEVICE_MAX] = {
 	{
@@ -352,6 +370,7 @@ static struct intel_th_subdevice {
 		.id	= 0,
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_MSU,
+		.scrpd	= SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
 	},
 	{
 		.nres	= 2,
@@ -371,6 +390,7 @@ static struct intel_th_subdevice {
 		.id	= 1,
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_MSU,
+		.scrpd	= SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
 	},
 	{
 		.nres	= 2,
@@ -403,6 +423,7 @@ static struct intel_th_subdevice {
 		.name	= "pti",
 		.type	= INTEL_TH_OUTPUT,
 		.otype	= GTH_PTI,
+		.scrpd	= SCRPD_PTI_IS_PRIM_DEST,
 	},
 	{
 		.nres	= 1,
@@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
 			thdev->dev.devt = MKDEV(th->major, i);
 			thdev->output.type = subdev->otype;
 			thdev->output.port = -1;
+			thdev->output.scratchpad = subdev->scrpd;
 		}
 
 		err = device_add(&thdev->dev);
@@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
 	}
 	th->dev = dev;
 
+	dev_set_drvdata(dev, th);
+
 	err = intel_th_populate(th, devres, ndevres, irq);
 	if (err)
 		goto err_chrdev;

+ 13 - 19
drivers/hwtracing/intel_th/gth.c

@@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port)
 	iowrite32(val, gth->base + reg);
 }
 
-/*static int gth_master_get(struct gth_device *gth, unsigned int master)
-{
-	unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
-	unsigned int shift = (master & 0x7) * 4;
-	u32 val;
-
-	if (master >= 256) {
-		reg = REG_GTH_GSWTDEST;
-		shift = 0;
-	}
-
-	val = ioread32(gth->base + reg);
-	val &= (0xf << shift);
-	val >>= shift;
-
-	return val ? val & 0x7 : -1;
-	}*/
-
 static ssize_t master_attr_show(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
@@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth)
 	if (scratchpad & SCRPD_DEBUGGER_IN_USE)
 		return -EBUSY;
 
+	/* Always save/restore STH and TU registers in S0ix entry/exit */
+	scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+	iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+
 	/* output ports */
 	for (port = 0; port < 8; port++) {
 		if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
@@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
 	if (!count)
 		dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
 			output->port);
+
+	reg = ioread32(gth->base + REG_GTH_SCRPD0);
+	reg &= ~output->scratchpad;
+	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 }
 
 /**
@@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
 				struct intel_th_output *output)
 {
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
-	u32 scr = 0xfc0000;
+	u32 scr = 0xfc0000, scrpd;
 	int master;
 
 	spin_lock(&gth->gth_lock);
@@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
 	output->active = true;
 	spin_unlock(&gth->gth_lock);
 
+	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
+	scrpd |= output->scratchpad;
+	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
+
 	iowrite32(scr, gth->base + REG_GTH_SCR);
 	iowrite32(0, gth->base + REG_GTH_SCR2);
 }

+ 0 - 3
drivers/hwtracing/intel_th/gth.h

@@ -57,9 +57,6 @@ enum {
 	REG_GTH_SCRPD3		= 0xec, /* ScratchPad[3] */
 };
 
-/* Externall debugger is using Intel TH */
-#define SCRPD_DEBUGGER_IN_USE	BIT(24)
-
 /* waiting for Pipeline Empty bit(s) to assert for GTH */
 #define GTH_PLE_WAITLOOP_DEPTH	10000
 

+ 41 - 0
drivers/hwtracing/intel_th/intel_th.h

@@ -30,6 +30,7 @@ enum {
  * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
  * @port:	output port number, assigned by the switch
  * @type:	GTH_{MSU,CTP,PTI}
+ * @scratchpad:	scratchpad bits to flag when this output is enabled
  * @multiblock:	true for multiblock output configuration
  * @active:	true when this output is enabled
  *
@@ -41,6 +42,7 @@ enum {
 struct intel_th_output {
 	int		port;
 	unsigned int	type;
+	unsigned int	scratchpad;
 	bool		multiblock;
 	bool		active;
 };
@@ -241,4 +243,43 @@ enum {
 	GTH_PTI = 4,	/* MIPI-PTI */
 };
 
+/*
+ * Scratchpad bits: tell firmware and external debuggers
+ * what we are up to.
+ */
+enum {
+	/* Memory is the primary destination */
+	SCRPD_MEM_IS_PRIM_DEST		= BIT(0),
+	/* XHCI DbC is the primary destination */
+	SCRPD_DBC_IS_PRIM_DEST		= BIT(1),
+	/* PTI is the primary destination */
+	SCRPD_PTI_IS_PRIM_DEST		= BIT(2),
+	/* BSSB is the primary destination */
+	SCRPD_BSSB_IS_PRIM_DEST		= BIT(3),
+	/* PTI is the alternate destination */
+	SCRPD_PTI_IS_ALT_DEST		= BIT(4),
+	/* BSSB is the alternate destination */
+	SCRPD_BSSB_IS_ALT_DEST		= BIT(5),
+	/* DeepSx exit occurred */
+	SCRPD_DEEPSX_EXIT		= BIT(6),
+	/* S4 exit occurred */
+	SCRPD_S4_EXIT			= BIT(7),
+	/* S5 exit occurred */
+	SCRPD_S5_EXIT			= BIT(8),
+	/* MSU controller 0/1 is enabled */
+	SCRPD_MSC0_IS_ENABLED		= BIT(9),
+	SCRPD_MSC1_IS_ENABLED		= BIT(10),
+	/* Sx exit occurred */
+	SCRPD_SX_EXIT			= BIT(11),
+	/* Trigger Unit is enabled */
+	SCRPD_TRIGGER_IS_ENABLED	= BIT(12),
+	SCRPD_ODLA_IS_ENABLED		= BIT(13),
+	SCRPD_SOCHAP_IS_ENABLED		= BIT(14),
+	SCRPD_STH_IS_ENABLED		= BIT(15),
+	SCRPD_DCIH_IS_ENABLED		= BIT(16),
+	SCRPD_VER_IS_ENABLED		= BIT(17),
+	/* External debugger is using Intel TH */
+	SCRPD_DEBUGGER_IN_USE		= BIT(24),
+};
+
 #endif

+ 4 - 5
drivers/hwtracing/intel_th/msu.c

@@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
 		 * Second time (wrap_count==1), it's just like any other block,
 		 * containing data in the range of [MSC_BDESC..data_bytes].
 		 */
-		if (iter->block == iter->start_block && iter->wrap_count) {
+		if (iter->block == iter->start_block && iter->wrap_count == 2) {
 			tocopy = DATA_IN_PAGE - data_bytes;
 			src += data_bytes;
 		}
@@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
 		size = msc->nr_pages << PAGE_SHIFT;
 
 	if (!size)
-		return 0;
+		goto put_count;
 
-	if (off >= size) {
-		len = 0;
+	if (off >= size)
 		goto put_count;
-	}
+
 	if (off + len >= size)
 		len = size - off;
 

+ 10 - 2
drivers/hwtracing/intel_th/pci.c

@@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
 	if (IS_ERR(th))
 		return PTR_ERR(th);
 
-	pci_set_drvdata(pdev, th);
-
 	return 0;
 }
 
@@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
 		.driver_data = (kernel_ulong_t)0,
 	},
+	{
+		/* Apollo Lake */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e),
+		.driver_data = (kernel_ulong_t)0,
+	},
+	{
+		/* Broxton */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+		.driver_data = (kernel_ulong_t)0,
+	},
 	{ 0 },
 };
 

+ 8 - 3
drivers/hwtracing/intel_th/sth.c

@@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
 	case STP_PACKET_TRIG:
 		if (flags & STP_PACKET_TIMESTAMPED)
 			reg += 4;
-		iowrite8(*payload, sth->base + reg);
+		writeb_relaxed(*payload, sth->base + reg);
 		break;
 
 	case STP_PACKET_MERR:
+		if (size > 4)
+			size = 4;
+
 		sth_iowrite(&out->MERR, payload, size);
 		break;
 
@@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
 		else
 			outp = (u64 __iomem *)&out->FLAG;
 
-		size = 1;
-		sth_iowrite(outp, payload, size);
+		size = 0;
+		writeb_relaxed(0, outp);
 		break;
 
 	case STP_PACKET_USER:
@@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
 
 		sth_iowrite(outp, payload, size);
 		break;
+	default:
+		return -ENOTSUPP;
 	}
 
 	return size;

+ 16 - 0
drivers/hwtracing/stm/Kconfig

@@ -1,6 +1,7 @@
 config STM
 	tristate "System Trace Module devices"
 	select CONFIGFS_FS
+	select SRCU
 	help
 	  A System Trace Module (STM) is a device exporting data in System
 	  Trace Protocol (STP) format as defined by MIPI STP standards.
@@ -8,6 +9,8 @@ config STM
 
 	  Say Y here to enable System Trace Module device support.
 
+if STM
+
 config STM_DUMMY
 	tristate "Dummy STM driver"
 	help
@@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE
 
 	  If you want to send kernel console messages over STM devices,
 	  say Y.
+
+config STM_SOURCE_HEARTBEAT
+	tristate "Heartbeat over STM devices"
+	help
+	  This is a kernel space trace source that sends periodic
+	  heartbeat messages to trace hosts over STM devices. It is
+	  also useful for testing stm class drivers and the stm class
+	  framework itself.
+
+	  If you want to send heartbeat messages over STM devices,
+	  say Y.
+
+endif

+ 2 - 0
drivers/hwtracing/stm/Makefile

@@ -5,5 +5,7 @@ stm_core-y		:= core.o policy.o
 obj-$(CONFIG_STM_DUMMY)	+= dummy_stm.o
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)	+= stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT)	+= stm_heartbeat.o
 
 stm_console-y		:= console.o
+stm_heartbeat-y		:= heartbeat.o

+ 136 - 39
drivers/hwtracing/stm/core.c

@@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf)
 
 	stm = to_stm_device(dev);
 	if (!try_module_get(stm->owner)) {
+		/* matches class_find_device() above */
 		put_device(dev);
 		return NULL;
 	}
@@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf)
  * @stm:	stm device, previously acquired by stm_find_device()
  *
  * This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
  */
 void stm_put_device(struct stm_device *stm)
 {
@@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
 {
 	struct stp_master *master = stm_master(stm, output->master);
 
+	lockdep_assert_held(&stm->mc_lock);
+	lockdep_assert_held(&output->lock);
+
 	if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
 		return;
 
@@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
 {
 	struct stp_master *master = stm_master(stm, output->master);
 
+	lockdep_assert_held(&stm->mc_lock);
+	lockdep_assert_held(&output->lock);
+
 	bitmap_release_region(&master->chan_map[0], output->channel,
 			      ilog2(output->nr_chans));
 
@@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
 	return -1;
 }
 
-static unsigned int
+static int
 stm_find_master_chan(struct stm_device *stm, unsigned int width,
 		     unsigned int *mstart, unsigned int mend,
 		     unsigned int *cstart, unsigned int cend)
@@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
 	}
 
 	spin_lock(&stm->mc_lock);
+	spin_lock(&output->lock);
 	/* output is already assigned -- shouldn't happen */
 	if (WARN_ON_ONCE(output->nr_chans))
 		goto unlock;
 
 	ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
-	if (ret)
+	if (ret < 0)
 		goto unlock;
 
 	output->master = midx;
@@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
 
 	ret = 0;
 unlock:
+	spin_unlock(&output->lock);
 	spin_unlock(&stm->mc_lock);
 
 	return ret;
@@ -312,11 +321,18 @@ unlock:
 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
 {
 	spin_lock(&stm->mc_lock);
+	spin_lock(&output->lock);
 	if (output->nr_chans)
 		stm_output_disclaim(stm, output);
+	spin_unlock(&output->lock);
 	spin_unlock(&stm->mc_lock);
 }
 
+static void stm_output_init(struct stm_output *output)
+{
+	spin_lock_init(&output->lock);
+}
+
 static int major_match(struct device *dev, const void *data)
 {
 	unsigned int major = *(unsigned int *)data;
@@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
 	if (!stmf)
 		return -ENOMEM;
 
+	stm_output_init(&stmf->output);
 	stmf->stm = to_stm_device(dev);
 
 	if (!try_module_get(stmf->stm->owner))
@@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
 	return nonseekable_open(inode, file);
 
 err_free:
+	/* matches class_find_device() above */
+	put_device(dev);
 	kfree(stmf);
 
 	return err;
@@ -357,9 +376,19 @@ err_free:
 static int stm_char_release(struct inode *inode, struct file *file)
 {
 	struct stm_file *stmf = file->private_data;
+	struct stm_device *stm = stmf->stm;
+
+	if (stm->data->unlink)
+		stm->data->unlink(stm->data, stmf->output.master,
+				  stmf->output.channel);
 
-	stm_output_free(stmf->stm, &stmf->output);
-	stm_put_device(stmf->stm);
+	stm_output_free(stm, &stmf->output);
+
+	/*
+	 * matches the stm_char_open()'s
+	 * class_find_device() + try_module_get()
+	 */
+	stm_put_device(stm);
 	kfree(stmf);
 
 	return 0;
@@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
 	return ret;
 }
 
-static void stm_write(struct stm_data *data, unsigned int master,
-		      unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+			  unsigned int channel, const char *buf, size_t count)
 {
 	unsigned int flags = STP_PACKET_TIMESTAMPED;
 	const unsigned char *p = buf, nil = 0;
@@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
 		sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
 				  sz, p);
 		flags = 0;
+
+		if (sz < 0)
+			break;
 	}
 
 	data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+	return pos;
 }
 
 static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
 	char *kbuf;
 	int err;
 
+	if (count + 1 > PAGE_SIZE)
+		count = PAGE_SIZE - 1;
+
 	/*
 	 * if no m/c have been assigned to this writer up to this
 	 * point, use "default" policy entry
@@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
 		return -EFAULT;
 	}
 
-	stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
-		  count);
+	count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+			  kbuf, count);
 
 	kfree(kbuf);
 
@@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
 		ret = stm->data->link(stm->data, stmf->output.master,
 				      stmf->output.channel);
 
-	if (ret) {
+	if (ret)
 		stm_output_free(stmf->stm, &stmf->output);
-		stm_put_device(stmf->stm);
-	}
 
 err_free:
 	kfree(id);
@@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
 	if (!stm_data->packet || !stm_data->sw_nchannels)
 		return -EINVAL;
 
-	nmasters = stm_data->sw_end - stm_data->sw_start;
+	nmasters = stm_data->sw_end - stm_data->sw_start + 1;
 	stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
 	if (!stm)
 		return -ENOMEM;
@@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
 	if (err)
 		goto err_device;
 
+	mutex_init(&stm->link_mutex);
 	spin_lock_init(&stm->link_lock);
 	INIT_LIST_HEAD(&stm->link_list);
 
@@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
 	return 0;
 
 err_device:
+	/* matches device_initialize() above */
 	put_device(&stm->dev);
 err_free:
 	kfree(stm);
@@ -662,20 +699,28 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(stm_register_device);
 
-static void __stm_source_link_drop(struct stm_source_device *src,
-				   struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+				  struct stm_device *stm);
 
 void stm_unregister_device(struct stm_data *stm_data)
 {
 	struct stm_device *stm = stm_data->stm;
 	struct stm_source_device *src, *iter;
-	int i;
+	int i, ret;
 
-	spin_lock(&stm->link_lock);
+	mutex_lock(&stm->link_mutex);
 	list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
-		__stm_source_link_drop(src, stm);
+		ret = __stm_source_link_drop(src, stm);
+		/*
+		 * src <-> stm link must not change under the same
+		 * stm::link_mutex, so complain loudly if it has;
+		 * also in this situation ret!=0 means this src is
+		 * not connected to this stm and it should be otherwise
+		 * safe to proceed with the tear-down of stm.
+		 */
+		WARN_ON_ONCE(ret);
 	}
-	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	synchronize_srcu(&stm_source_srcu);
 
@@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data)
 		stp_policy_unbind(stm->policy);
 	mutex_unlock(&stm->policy_mutex);
 
-	for (i = 0; i < stm->sw_nmasters; i++)
+	for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
 		stp_master_free(stm, i);
 
 	device_unregister(&stm->dev);
@@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data)
 }
 EXPORT_SYMBOL_GPL(stm_unregister_device);
 
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ *   stm::link_mutex
+ *     stm::link_lock
+ *       src::link_lock
+ */
+
 /**
  * stm_source_link_add() - connect an stm_source device to an stm device
  * @src:	stm_source device
@@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src,
 	char *id;
 	int err;
 
+	mutex_lock(&stm->link_mutex);
 	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 
@@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src,
 
 	spin_unlock(&src->link_lock);
 	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	id = kstrdup(src->data->name, GFP_KERNEL);
 	if (id) {
@@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src,
 
 fail_free_output:
 	stm_output_free(stm, &src->output);
-	stm_put_device(stm);
 
 fail_detach:
+	mutex_lock(&stm->link_mutex);
 	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 
@@ -764,6 +822,7 @@ fail_detach:
 
 	spin_unlock(&src->link_lock);
 	spin_unlock(&stm->link_lock);
+	mutex_unlock(&stm->link_mutex);
 
 	return err;
 }
@@ -776,28 +835,55 @@ fail_detach:
  * If @stm is @src::link, disconnect them from one another and put the
  * reference on the @stm device.
  *
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
  */
-static void __stm_source_link_drop(struct stm_source_device *src,
-				   struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+				  struct stm_device *stm)
 {
 	struct stm_device *link;
+	int ret = 0;
+
+	lockdep_assert_held(&stm->link_mutex);
 
+	/* for stm::link_list modification, we hold both mutex and spinlock */
+	spin_lock(&stm->link_lock);
 	spin_lock(&src->link_lock);
 	link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
-	if (WARN_ON_ONCE(link != stm)) {
-		spin_unlock(&src->link_lock);
-		return;
+
+	/*
+	 * The linked device may have changed since we last looked, because
+	 * we weren't holding the src::link_lock back then; if this is the
+	 * case, tell the caller to retry.
+	 */
+	if (link != stm) {
+		ret = -EAGAIN;
+		goto unlock;
 	}
 
 	stm_output_free(link, &src->output);
-	/* caller must hold stm::link_lock */
 	list_del_init(&src->link_entry);
 	/* matches stm_find_device() from stm_source_link_store() */
 	stm_put_device(link);
 	rcu_assign_pointer(src->link, NULL);
 
+unlock:
 	spin_unlock(&src->link_lock);
+	spin_unlock(&stm->link_lock);
+
+	/*
+	 * Call the unlink callbacks for both source and stm, when we know
+	 * that we have actually performed the unlinking.
+	 */
+	if (!ret) {
+		if (src->data->unlink)
+			src->data->unlink(src->data);
+
+		if (stm->data->unlink)
+			stm->data->unlink(stm->data, src->output.master,
+					  src->output.channel);
+	}
+
+	return ret;
 }
 
 /**
@@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
 static void stm_source_link_drop(struct stm_source_device *src)
 {
 	struct stm_device *stm;
-	int idx;
+	int idx, ret;
 
+retry:
 	idx = srcu_read_lock(&stm_source_srcu);
+	/*
+	 * The stm device will be valid for the duration of this
+	 * read section, but the link may change before we grab
+	 * the src::link_lock in __stm_source_link_drop().
+	 */
 	stm = srcu_dereference(src->link, &stm_source_srcu);
 
+	ret = 0;
 	if (stm) {
-		if (src->data->unlink)
-			src->data->unlink(src->data);
-
-		spin_lock(&stm->link_lock);
-		__stm_source_link_drop(src, stm);
-		spin_unlock(&stm->link_lock);
+		mutex_lock(&stm->link_mutex);
+		ret = __stm_source_link_drop(src, stm);
+		mutex_unlock(&stm->link_mutex);
 	}
 
 	srcu_read_unlock(&stm_source_srcu, idx);
+
+	/* if it did change, retry */
+	if (ret == -EAGAIN)
+		goto retry;
 }
 
 static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev,
 		return -EINVAL;
 
 	err = stm_source_link_add(src, link);
-	if (err)
+	if (err) {
+		/* matches the stm_find_device() above */
 		stm_put_device(link);
+	}
 
 	return err ? : count;
 }
@@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent,
 	if (err)
 		goto err;
 
+	stm_output_init(&src->output);
 	spin_lock_init(&src->link_lock);
 	INIT_LIST_HEAD(&src->link_entry);
 	src->data = data;
@@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
 
 	stm = srcu_dereference(src->link, &stm_source_srcu);
 	if (stm)
-		stm_write(stm->data, src->output.master,
-			  src->output.channel + chan,
-			  buf, count);
+		count = stm_write(stm->data, src->output.master,
+				  src->output.channel + chan,
+				  buf, count);
 	else
 		count = -ENODEV;
 

+ 62 - 9
drivers/hwtracing/stm/dummy_stm.c

@@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
 	return size;
 }
 
-static struct stm_data dummy_stm = {
-	.name		= "dummy_stm",
-	.sw_start	= 0x0000,
-	.sw_end		= 0xffff,
-	.sw_nchannels	= 0xffff,
-	.packet		= dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0600);
+
+static unsigned int dummy_stm_nr;
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+			  unsigned int channel)
+{
+	if (fail_mode && (channel & fail_mode))
+		return -EINVAL;
+
+	return 0;
+}
 
 static int dummy_stm_init(void)
 {
-	return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+	int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+
+	if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+		return -EINVAL;
+
+	for (i = 0; i < __nr_dummies; i++) {
+		dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+		if (!dummy_stm[i].name)
+			goto fail_unregister;
+
+		dummy_stm[i].sw_start		= 0x0000;
+		dummy_stm[i].sw_end		= 0xffff;
+		dummy_stm[i].sw_nchannels	= 0xffff;
+		dummy_stm[i].packet		= dummy_stm_packet;
+		dummy_stm[i].link		= dummy_stm_link;
+
+		ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+		if (ret)
+			goto fail_free;
+	}
+
+	dummy_stm_nr = __nr_dummies;
+
+	return 0;
+
+fail_unregister:
+	for (i--; i >= 0; i--) {
+		stm_unregister_device(&dummy_stm[i]);
+fail_free:
+		kfree(dummy_stm[i].name);
+	}
+
+	return ret;
+
 }
 
 static void dummy_stm_exit(void)
 {
-	stm_unregister_device(&dummy_stm);
+	int i;
+
+	for (i = 0; i < dummy_stm_nr; i++) {
+		stm_unregister_device(&dummy_stm[i]);
+		kfree(dummy_stm[i].name);
+	}
 }
 
 module_init(dummy_stm_init);

+ 130 - 0
drivers/hwtracing/stm/heartbeat.c

@@ -0,0 +1,130 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX	32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0600);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+	struct stm_source_data	data;
+	struct hrtimer		hrtimer;
+	unsigned int		active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static unsigned int nr_instances;
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+	struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+						       hrtimer);
+
+	stm_source_write(&heartbeat->data, 0, str, sizeof str);
+	if (heartbeat->active)
+		hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+	return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+	struct stm_heartbeat *heartbeat =
+		container_of(data, struct stm_heartbeat, data);
+
+	heartbeat->active = 1;
+	hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+		      HRTIMER_MODE_ABS);
+
+	return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+	struct stm_heartbeat *heartbeat =
+		container_of(data, struct stm_heartbeat, data);
+
+	heartbeat->active = 0;
+	hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+	int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+
+	if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+		return -EINVAL;
+
+	for (i = 0; i < __nr_instances; i++) {
+		stm_heartbeat[i].data.name =
+			kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+		if (!stm_heartbeat[i].data.name)
+			goto fail_unregister;
+
+		stm_heartbeat[i].data.nr_chans	= 1;
+		stm_heartbeat[i].data.link		= stm_heartbeat_link;
+		stm_heartbeat[i].data.unlink	= stm_heartbeat_unlink;
+		hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_ABS);
+		stm_heartbeat[i].hrtimer.function =
+			stm_heartbeat_hrtimer_handler;
+
+		ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+		if (ret)
+			goto fail_free;
+	}
+
+	nr_instances = __nr_instances;
+
+	return 0;
+
+fail_unregister:
+	for (i--; i >= 0; i--) {
+		stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+		kfree(stm_heartbeat[i].data.name);
+	}
+
+	return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_instances; i++) {
+		stm_source_unregister_device(&stm_heartbeat[i].data);
+		kfree(stm_heartbeat[i].data.name);
+	}
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");

+ 19 - 6
drivers/hwtracing/stm/policy.c

@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
 {
 	struct stm_device *stm = policy->stm;
 
+	/*
+	 * stp_policy_release() will not call here if the policy is already
+	 * unbound; other users should not either, as no link exists between
+	 * this policy and anything else in that case
+	 */
 	if (WARN_ON_ONCE(!policy->stm))
 		return;
 
-	mutex_lock(&stm->policy_mutex);
-	stm->policy = NULL;
-	mutex_unlock(&stm->policy_mutex);
+	lockdep_assert_held(&stm->policy_mutex);
 
+	stm->policy = NULL;
 	policy->stm = NULL;
 
 	stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
 static void stp_policy_release(struct config_item *item)
 {
 	struct stp_policy *policy = to_stp_policy(item);
+	struct stm_device *stm = policy->stm;
 
+	/* a policy *can* be unbound and still exist in configfs tree */
+	if (!stm)
+		return;
+
+	mutex_lock(&stm->policy_mutex);
 	stp_policy_unbind(policy);
+	mutex_unlock(&stm->policy_mutex);
+
 	kfree(policy);
 }
 
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
 
 	/*
 	 * node must look like <device_name>.<policy_name>, where
-	 * <device_name> is the name of an existing stm device and
-	 * <policy_name> is an arbitrary string
+	 * <device_name> is the name of an existing stm device; may
+	 *               contain dots;
+	 * <policy_name> is an arbitrary string; may not contain dots
 	 */
-	p = strchr(devname, '.');
+	p = strrchr(devname, '.');
 	if (!p) {
 		kfree(devname);
 		return ERR_PTR(-EINVAL);

+ 2 - 0
drivers/hwtracing/stm/stm.h

@@ -45,6 +45,7 @@ struct stm_device {
 	int			major;
 	unsigned int		sw_nmasters;
 	struct stm_data		*data;
+	struct mutex		link_mutex;
 	spinlock_t		link_lock;
 	struct list_head	link_list;
 	/* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
 	container_of((_d), struct stm_device, dev)
 
 struct stm_output {
+	spinlock_t		lock;
 	unsigned int		master;
 	unsigned int		channel;
 	unsigned int		nr_chans;

+ 2 - 2
drivers/misc/Kconfig

@@ -440,7 +440,7 @@ config ARM_CHARLCD
 	  still useful.
 
 config BMP085
-	bool
+	tristate
 	depends on SYSFS
 
 config BMP085_I2C
@@ -470,7 +470,7 @@ config BMP085_SPI
 config PCH_PHUB
 	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
 	select GENERIC_NET_UTILS
-	depends on PCI && (X86_32 || COMPILE_TEST)
+	depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
 	help
 	  This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
 	  Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded

+ 2 - 2
drivers/misc/ad525x_dpot.c

@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
 			 */
 			value = swab16(value);
 
-			if (dpot->uid == DPOT_UID(AD5271_ID))
+			if (dpot->uid == DPOT_UID(AD5274_ID))
 				value = value >> 2;
 		return value;
 	default:
@@ -452,7 +452,7 @@ static ssize_t sysfs_set_reg(struct device *dev,
 	int err;
 
 	if (reg & DPOT_ADDR_OTP_EN) {
-		if (!strncmp(buf, "enabled", sizeof("enabled")))
+		if (sysfs_streq(buf, "enabled"))
 			set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
 		else
 			clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);

+ 4 - 4
drivers/misc/apds990x.c

@@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int apds990x_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_off(chip);
@@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev)
 
 static int apds990x_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	/*
@@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int apds990x_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_off(chip);
@@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev)
 
 static int apds990x_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct apds990x_chip *chip = i2c_get_clientdata(client);
 
 	apds990x_chip_on(chip);

+ 2 - 22
drivers/misc/arm-charlcd.c

@@ -8,7 +8,6 @@
  * Author: Linus Walleij <triad@df.lth.se>
  */
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
@@ -328,20 +327,6 @@ out_no_resource:
 	return ret;
 }
 
-static int __exit charlcd_remove(struct platform_device *pdev)
-{
-	struct charlcd *lcd = platform_get_drvdata(pdev);
-
-	if (lcd) {
-		free_irq(lcd->irq, lcd);
-		iounmap(lcd->virtbase);
-		release_mem_region(lcd->phybase, lcd->physize);
-		kfree(lcd);
-	}
-
-	return 0;
-}
-
 static int charlcd_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = {
 	.driver = {
 		.name = DRIVERNAME,
 		.pm = &charlcd_pm_ops,
+		.suppress_bind_attrs = true,
 		.of_match_table = of_match_ptr(charlcd_match),
 	},
-	.remove = __exit_p(charlcd_remove),
 };
-
-module_platform_driver_probe(charlcd_driver, charlcd_probe);
-
-MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
-MODULE_DESCRIPTION("ARM Character LCD Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(charlcd_driver, charlcd_probe);

+ 4 - 4
drivers/misc/bh1770glc.c

@@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int bh1770_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_off(chip);
@@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev)
 
 static int bh1770_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 	int ret = 0;
 
@@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int bh1770_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_off(chip);
@@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev)
 
 static int bh1770_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct bh1770_chip *chip = i2c_get_clientdata(client);
 
 	bh1770_chip_on(chip);

+ 2 - 6
drivers/misc/c2port/core.c

@@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
-	struct c2port_device *c2dev =
-			dev_get_drvdata(container_of(kobj,
-						struct device, kobj));
+	struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
 	ssize_t ret;
 
 	/* Check the device and flash access status */
@@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr,
 				char *buffer, loff_t offset, size_t count)
 {
-	struct c2port_device *c2dev =
-			dev_get_drvdata(container_of(kobj,
-						struct device, kobj));
+	struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
 	int ret;
 
 	/* Check the device access status */

+ 2 - 3
drivers/misc/cxl/sysfs.c

@@ -386,8 +386,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
 			       struct bin_attribute *bin_attr, char *buf,
 			       loff_t off, size_t count)
 {
-	struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
-						      struct device, kobj));
+	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
 
 	return cxl_afu_read_err_buffer(afu, buf, off, count);
 }
@@ -467,7 +466,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
 			       loff_t off, size_t count)
 {
 	struct afu_config_record *cr = to_cr(kobj);
-	struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
 
 	u64 i, j, val;
 

+ 6 - 0
drivers/misc/eeprom/Kconfig

@@ -3,6 +3,8 @@ menu "EEPROM support"
 config EEPROM_AT24
 	tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
 	depends on I2C && SYSFS
+	select REGMAP
+	select NVMEM
 	help
 	  Enable this driver to get read/write support to most I2C EEPROMs
 	  and compatible devices like FRAMs, SRAMs, ROMs etc. After you
@@ -30,6 +32,8 @@ config EEPROM_AT24
 config EEPROM_AT25
 	tristate "SPI EEPROMs from most vendors"
 	depends on SPI && SYSFS
+	select REGMAP
+	select NVMEM
 	help
 	  Enable this driver to get read/write support to most SPI EEPROMs,
 	  after you configure the board init code to know about each eeprom
@@ -74,6 +78,8 @@ config EEPROM_93CX6
 config EEPROM_93XX46
 	tristate "Microwire EEPROM 93XX46 support"
 	depends on SPI && SYSFS
+	select REGMAP
+	select NVMEM
 	help
 	  Driver for the microwire EEPROM chipsets 93xx46x. The driver
 	  supports both read and write commands and also the command to

+ 70 - 60
drivers/misc/eeprom/at24.c

@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
-#include <linux/sysfs.h>
 #include <linux/mod_devicetable.h>
 #include <linux/log2.h>
 #include <linux/bitops.h>
@@ -23,6 +22,8 @@
 #include <linux/of.h>
 #include <linux/acpi.h>
 #include <linux/i2c.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/platform_data/at24.h>
 
 /*
@@ -55,7 +56,6 @@
 
 struct at24_data {
 	struct at24_platform_data chip;
-	struct memory_accessor macc;
 	int use_smbus;
 	int use_smbus_write;
 
@@ -64,12 +64,15 @@ struct at24_data {
 	 * but not from changes by other I2C masters.
 	 */
 	struct mutex lock;
-	struct bin_attribute bin;
 
 	u8 *writebuf;
 	unsigned write_max;
 	unsigned num_addresses;
 
+	struct regmap_config regmap_config;
+	struct nvmem_config nvmem_config;
+	struct nvmem_device *nvmem;
+
 	/*
 	 * Some chips tie up multiple I2C addresses; dummy devices reserve
 	 * them for us, and we'll use them with SMBus calls.
@@ -283,17 +286,6 @@ static ssize_t at24_read(struct at24_data *at24,
 	return retval;
 }
 
-static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t off, size_t count)
-{
-	struct at24_data *at24;
-
-	at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
-	return at24_read(at24, buf, off, count);
-}
-
-
 /*
  * Note that if the hardware write-protect pin is pulled high, the whole
  * chip is normally write protected. But there are plenty of product
@@ -414,40 +406,49 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
 	return retval;
 }
 
-static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
-		struct bin_attribute *attr,
-		char *buf, loff_t off, size_t count)
-{
-	struct at24_data *at24;
-
-	at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
-	return at24_write(at24, buf, off, count);
-}
-
 /*-------------------------------------------------------------------------*/
 
 /*
- * This lets other kernel code access the eeprom data. For example, it
- * might hold a board's Ethernet address, or board-specific calibration
- * data generated on the manufacturing floor.
- */
-
-static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
-			 off_t offset, size_t count)
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
+			    void *val, size_t val_size)
 {
-	struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+	struct at24_data *at24 = context;
+	off_t offset = *(u32 *)reg;
+	int err;
 
-	return at24_read(at24, buf, offset, count);
+	err = at24_read(at24, val, offset, val_size);
+	if (err)
+		return err;
+	return 0;
 }
 
-static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
-			  off_t offset, size_t count)
+static int at24_regmap_write(void *context, const void *data, size_t count)
 {
-	struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+	struct at24_data *at24 = context;
+	const char *buf;
+	u32 offset;
+	size_t len;
+	int err;
 
-	return at24_write(at24, buf, offset, count);
+	memcpy(&offset, data, sizeof(offset));
+	buf = (const char *)data + sizeof(offset);
+	len = count - sizeof(offset);
+
+	err = at24_write(at24, buf, offset, len);
+	if (err)
+		return err;
+	return 0;
 }
 
+static const struct regmap_bus at24_regmap_bus = {
+	.read = at24_regmap_read,
+	.write = at24_regmap_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
 /*-------------------------------------------------------------------------*/
 
 #ifdef CONFIG_OF
@@ -481,6 +482,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	struct at24_data *at24;
 	int err;
 	unsigned i, num_addresses;
+	struct regmap *regmap;
 
 	if (client->dev.platform_data) {
 		chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -573,29 +575,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	at24->chip = chip;
 	at24->num_addresses = num_addresses;
 
-	/*
-	 * Export the EEPROM bytes through sysfs, since that's convenient.
-	 * By default, only root should see the data (maybe passwords etc)
-	 */
-	sysfs_bin_attr_init(&at24->bin);
-	at24->bin.attr.name = "eeprom";
-	at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
-	at24->bin.read = at24_bin_read;
-	at24->bin.size = chip.byte_len;
-
-	at24->macc.read = at24_macc_read;
-
 	writable = !(chip.flags & AT24_FLAG_READONLY);
 	if (writable) {
 		if (!use_smbus || use_smbus_write) {
 
 			unsigned write_max = chip.page_size;
 
-			at24->macc.write = at24_macc_write;
-
-			at24->bin.write = at24_bin_write;
-			at24->bin.attr.mode |= S_IWUSR;
-
 			if (write_max > io_limit)
 				write_max = io_limit;
 			if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
@@ -627,14 +612,38 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		}
 	}
 
-	err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
-	if (err)
+	at24->regmap_config.reg_bits = 32;
+	at24->regmap_config.val_bits = 8;
+	at24->regmap_config.reg_stride = 1;
+	at24->regmap_config.max_register = chip.byte_len - 1;
+
+	regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
+				  &at24->regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "regmap init failed\n");
+		err = PTR_ERR(regmap);
+		goto err_clients;
+	}
+
+	at24->nvmem_config.name = dev_name(&client->dev);
+	at24->nvmem_config.dev = &client->dev;
+	at24->nvmem_config.read_only = !writable;
+	at24->nvmem_config.root_only = true;
+	at24->nvmem_config.owner = THIS_MODULE;
+	at24->nvmem_config.compat = true;
+	at24->nvmem_config.base_dev = &client->dev;
+
+	at24->nvmem = nvmem_register(&at24->nvmem_config);
+
+	if (IS_ERR(at24->nvmem)) {
+		err = PTR_ERR(at24->nvmem);
 		goto err_clients;
+	}
 
 	i2c_set_clientdata(client, at24);
 
-	dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
-		at24->bin.size, client->name,
+	dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
+		chip.byte_len, client->name,
 		writable ? "writable" : "read-only", at24->write_max);
 	if (use_smbus == I2C_SMBUS_WORD_DATA ||
 	    use_smbus == I2C_SMBUS_BYTE_DATA) {
@@ -645,7 +654,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 	/* export data to kernel code */
 	if (chip.setup)
-		chip.setup(&at24->macc, chip.context);
+		chip.setup(at24->nvmem, chip.context);
 
 	return 0;
 
@@ -663,7 +672,8 @@ static int at24_remove(struct i2c_client *client)
 	int i;
 
 	at24 = i2c_get_clientdata(client);
-	sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
+
+	nvmem_unregister(at24->nvmem);
 
 	for (i = 1; i < at24->num_addresses; i++)
 		i2c_unregister_device(at24->client[i]);

+ 68 - 80
drivers/misc/eeprom/at25.c

@@ -16,6 +16,8 @@
 #include <linux/device.h>
 #include <linux/sched.h>
 
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/eeprom.h>
 #include <linux/property.h>
@@ -29,11 +31,12 @@
 
 struct at25_data {
 	struct spi_device	*spi;
-	struct memory_accessor	mem;
 	struct mutex		lock;
 	struct spi_eeprom	chip;
-	struct bin_attribute	bin;
 	unsigned		addrlen;
+	struct regmap_config	regmap_config;
+	struct nvmem_config	nvmem_config;
+	struct nvmem_device	*nvmem;
 };
 
 #define	AT25_WREN	0x06		/* latch the write enable */
@@ -77,10 +80,10 @@ at25_ee_read(
 	struct spi_message	m;
 	u8			instr;
 
-	if (unlikely(offset >= at25->bin.size))
+	if (unlikely(offset >= at25->chip.byte_len))
 		return 0;
-	if ((offset + count) > at25->bin.size)
-		count = at25->bin.size - offset;
+	if ((offset + count) > at25->chip.byte_len)
+		count = at25->chip.byte_len - offset;
 	if (unlikely(!count))
 		return count;
 
@@ -131,21 +134,19 @@ at25_ee_read(
 	return status ? status : count;
 }
 
-static ssize_t
-at25_bin_read(struct file *filp, struct kobject *kobj,
-	      struct bin_attribute *bin_attr,
-	      char *buf, loff_t off, size_t count)
+static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
+			    void *val, size_t val_size)
 {
-	struct device		*dev;
-	struct at25_data	*at25;
+	struct at25_data *at25 = context;
+	off_t offset = *(u32 *)reg;
+	int err;
 
-	dev = container_of(kobj, struct device, kobj);
-	at25 = dev_get_drvdata(dev);
-
-	return at25_ee_read(at25, buf, off, count);
+	err = at25_ee_read(at25, val, offset, val_size);
+	if (err)
+		return err;
+	return 0;
 }
 
-
 static ssize_t
 at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
 	      size_t count)
@@ -155,10 +156,10 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
 	unsigned		buf_size;
 	u8			*bounce;
 
-	if (unlikely(off >= at25->bin.size))
+	if (unlikely(off >= at25->chip.byte_len))
 		return -EFBIG;
-	if ((off + count) > at25->bin.size)
-		count = at25->bin.size - off;
+	if ((off + count) > at25->chip.byte_len)
+		count = at25->chip.byte_len - off;
 	if (unlikely(!count))
 		return count;
 
@@ -265,39 +266,29 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
 	return written ? written : status;
 }
 
-static ssize_t
-at25_bin_write(struct file *filp, struct kobject *kobj,
-	       struct bin_attribute *bin_attr,
-	       char *buf, loff_t off, size_t count)
+static int at25_regmap_write(void *context, const void *data, size_t count)
 {
-	struct device		*dev;
-	struct at25_data	*at25;
-
-	dev = container_of(kobj, struct device, kobj);
-	at25 = dev_get_drvdata(dev);
-
-	return at25_ee_write(at25, buf, off, count);
-}
+	struct at25_data *at25 = context;
+	const char *buf;
+	u32 offset;
+	size_t len;
+	int err;
 
-/*-------------------------------------------------------------------------*/
-
-/* Let in-kernel code access the eeprom data. */
-
-static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
-			 off_t offset, size_t count)
-{
-	struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+	memcpy(&offset, data, sizeof(offset));
+	buf = (const char *)data + sizeof(offset);
+	len = count - sizeof(offset);
 
-	return at25_ee_read(at25, buf, offset, count);
+	err = at25_ee_write(at25, buf, offset, len);
+	if (err)
+		return err;
+	return 0;
 }
 
-static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
-			  off_t offset, size_t count)
-{
-	struct at25_data *at25 = container_of(mem, struct at25_data, mem);
-
-	return at25_ee_write(at25, buf, offset, count);
-}
+static const struct regmap_bus at25_regmap_bus = {
+	.read = at25_regmap_read,
+	.write = at25_regmap_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
 
 /*-------------------------------------------------------------------------*/
 
@@ -358,6 +349,7 @@ static int at25_probe(struct spi_device *spi)
 {
 	struct at25_data	*at25 = NULL;
 	struct spi_eeprom	chip;
+	struct regmap		*regmap;
 	int			err;
 	int			sr;
 	int			addrlen;
@@ -402,40 +394,35 @@ static int at25_probe(struct spi_device *spi)
 	spi_set_drvdata(spi, at25);
 	at25->addrlen = addrlen;
 
-	/* Export the EEPROM bytes through sysfs, since that's convenient.
-	 * And maybe to other kernel code; it might hold a board's Ethernet
-	 * address, or board-specific calibration data generated on the
-	 * manufacturing floor.
-	 *
-	 * Default to root-only access to the data; EEPROMs often hold data
-	 * that's sensitive for read and/or write, like ethernet addresses,
-	 * security codes, board-specific manufacturing calibrations, etc.
-	 */
-	sysfs_bin_attr_init(&at25->bin);
-	at25->bin.attr.name = "eeprom";
-	at25->bin.attr.mode = S_IRUSR;
-	at25->bin.read = at25_bin_read;
-	at25->mem.read = at25_mem_read;
-
-	at25->bin.size = at25->chip.byte_len;
-	if (!(chip.flags & EE_READONLY)) {
-		at25->bin.write = at25_bin_write;
-		at25->bin.attr.mode |= S_IWUSR;
-		at25->mem.write = at25_mem_write;
-	}
+	at25->regmap_config.reg_bits = 32;
+	at25->regmap_config.val_bits = 8;
+	at25->regmap_config.reg_stride = 1;
+	at25->regmap_config.max_register = chip.byte_len - 1;
 
-	err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
-	if (err)
-		return err;
-
-	if (chip.setup)
-		chip.setup(&at25->mem, chip.context);
+	regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
+				  &at25->regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
 
-	dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
-		(at25->bin.size < 1024)
-			? at25->bin.size
-			: (at25->bin.size / 1024),
-		(at25->bin.size < 1024) ? "Byte" : "KByte",
+	at25->nvmem_config.name = dev_name(&spi->dev);
+	at25->nvmem_config.dev = &spi->dev;
+	at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+	at25->nvmem_config.root_only = true;
+	at25->nvmem_config.owner = THIS_MODULE;
+	at25->nvmem_config.compat = true;
+	at25->nvmem_config.base_dev = &spi->dev;
+
+	at25->nvmem = nvmem_register(&at25->nvmem_config);
+	if (IS_ERR(at25->nvmem))
+		return PTR_ERR(at25->nvmem);
+
+	dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
+		(chip.byte_len < 1024)
+			? chip.byte_len
+			: (chip.byte_len / 1024),
+		(chip.byte_len < 1024) ? "Byte" : "KByte",
 		at25->chip.name,
 		(chip.flags & EE_READONLY) ? " (readonly)" : "",
 		at25->chip.page_size);
@@ -447,7 +434,8 @@ static int at25_remove(struct spi_device *spi)
 	struct at25_data	*at25;
 
 	at25 = spi_get_drvdata(spi);
-	sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
+	nvmem_unregister(at25->nvmem);
+
 	return 0;
 }
 

+ 1 - 1
drivers/misc/eeprom/eeprom.c

@@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
 			   struct bin_attribute *bin_attr,
 			   char *buf, loff_t off, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+	struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
 	struct eeprom_data *data = i2c_get_clientdata(client);
 	u8 slice;
 

+ 269 - 63
drivers/misc/eeprom/eeprom_93xx46.c

@@ -10,12 +10,17 @@
 
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/sysfs.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/eeprom_93xx46.h>
 
 #define OP_START	0x4
@@ -25,73 +30,111 @@
 #define ADDR_ERAL	0x20
 #define ADDR_EWEN	0x30
 
+struct eeprom_93xx46_devtype_data {
+	unsigned int quirks;
+};
+
+static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+	.quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
+		  EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+};
+
 struct eeprom_93xx46_dev {
 	struct spi_device *spi;
 	struct eeprom_93xx46_platform_data *pdata;
-	struct bin_attribute bin;
 	struct mutex lock;
+	struct regmap_config regmap_config;
+	struct nvmem_config nvmem_config;
+	struct nvmem_device *nvmem;
 	int addrlen;
+	int size;
 };
 
+static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
+{
+	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
+}
+
+static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+{
+	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+}
+
 static ssize_t
-eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
-		       struct bin_attribute *bin_attr,
-		       char *buf, loff_t off, size_t count)
+eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
+		   unsigned off, size_t count)
 {
-	struct eeprom_93xx46_dev *edev;
-	struct device *dev;
-	struct spi_message m;
-	struct spi_transfer t[2];
-	int bits, ret;
-	u16 cmd_addr;
+	ssize_t ret = 0;
 
-	dev = container_of(kobj, struct device, kobj);
-	edev = dev_get_drvdata(dev);
+	if (unlikely(off >= edev->size))
+		return 0;
+	if ((off + count) > edev->size)
+		count = edev->size - off;
+	if (unlikely(!count))
+		return count;
 
-	cmd_addr = OP_READ << edev->addrlen;
+	mutex_lock(&edev->lock);
 
-	if (edev->addrlen == 7) {
-		cmd_addr |= off & 0x7f;
-		bits = 10;
-	} else {
-		cmd_addr |= off & 0x3f;
-		bits = 9;
-	}
+	if (edev->pdata->prepare)
+		edev->pdata->prepare(edev);
 
-	dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
-		cmd_addr, edev->spi->max_speed_hz);
+	while (count) {
+		struct spi_message m;
+		struct spi_transfer t[2] = { { 0 } };
+		u16 cmd_addr = OP_READ << edev->addrlen;
+		size_t nbytes = count;
+		int bits;
+		int err;
+
+		if (edev->addrlen == 7) {
+			cmd_addr |= off & 0x7f;
+			bits = 10;
+			if (has_quirk_single_word_read(edev))
+				nbytes = 1;
+		} else {
+			cmd_addr |= (off >> 1) & 0x3f;
+			bits = 9;
+			if (has_quirk_single_word_read(edev))
+				nbytes = 2;
+		}
 
-	spi_message_init(&m);
-	memset(t, 0, sizeof(t));
+		dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+			cmd_addr, edev->spi->max_speed_hz);
 
-	t[0].tx_buf = (char *)&cmd_addr;
-	t[0].len = 2;
-	t[0].bits_per_word = bits;
-	spi_message_add_tail(&t[0], &m);
+		spi_message_init(&m);
 
-	t[1].rx_buf = buf;
-	t[1].len = count;
-	t[1].bits_per_word = 8;
-	spi_message_add_tail(&t[1], &m);
+		t[0].tx_buf = (char *)&cmd_addr;
+		t[0].len = 2;
+		t[0].bits_per_word = bits;
+		spi_message_add_tail(&t[0], &m);
 
-	mutex_lock(&edev->lock);
+		t[1].rx_buf = buf;
+		t[1].len = count;
+		t[1].bits_per_word = 8;
+		spi_message_add_tail(&t[1], &m);
 
-	if (edev->pdata->prepare)
-		edev->pdata->prepare(edev);
+		err = spi_sync(edev->spi, &m);
+		/* have to wait at least Tcsl ns */
+		ndelay(250);
 
-	ret = spi_sync(edev->spi, &m);
-	/* have to wait at least Tcsl ns */
-	ndelay(250);
-	if (ret) {
-		dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
-			count, (int)off, ret);
+		if (err) {
+			dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+				nbytes, (int)off, err);
+			ret = err;
+			break;
+		}
+
+		buf += nbytes;
+		off += nbytes;
+		count -= nbytes;
+		ret += nbytes;
 	}
 
 	if (edev->pdata->finish)
 		edev->pdata->finish(edev);
 
 	mutex_unlock(&edev->lock);
-	return ret ? : count;
+	return ret;
 }
 
 static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -110,7 +153,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
 		bits = 9;
 	}
 
-	dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+	if (has_quirk_instruction_length(edev)) {
+		cmd_addr <<= 2;
+		bits += 2;
+	}
+
+	dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
+			is_on ? "en" : "ds", cmd_addr, bits);
 
 	spi_message_init(&m);
 	memset(&t, 0, sizeof(t));
@@ -155,7 +204,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
 		bits = 10;
 		data_len = 1;
 	} else {
-		cmd_addr |= off & 0x3f;
+		cmd_addr |= (off >> 1) & 0x3f;
 		bits = 9;
 		data_len = 2;
 	}
@@ -182,16 +231,17 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
 }
 
 static ssize_t
-eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
-			struct bin_attribute *bin_attr,
-			char *buf, loff_t off, size_t count)
+eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
+		    loff_t off, size_t count)
 {
-	struct eeprom_93xx46_dev *edev;
-	struct device *dev;
 	int i, ret, step = 1;
 
-	dev = container_of(kobj, struct device, kobj);
-	edev = dev_get_drvdata(dev);
+	if (unlikely(off >= edev->size))
+		return -EFBIG;
+	if ((off + count) > edev->size)
+		count = edev->size - off;
+	if (unlikely(!count))
+		return count;
 
 	/* only write even number of bytes on 16-bit devices */
 	if (edev->addrlen == 6) {
@@ -228,6 +278,49 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
 	return ret ? : count;
 }
 
+/*
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int eeprom_93xx46_regmap_read(void *context, const void *reg,
+				     size_t reg_size, void *val,
+				     size_t val_size)
+{
+	struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+	off_t offset = *(u32 *)reg;
+	int err;
+
+	err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
+	if (err)
+		return err;
+	return 0;
+}
+
+static int eeprom_93xx46_regmap_write(void *context, const void *data,
+				      size_t count)
+{
+	struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+	const char *buf;
+	u32 offset;
+	size_t len;
+	int err;
+
+	memcpy(&offset, data, sizeof(offset));
+	buf = (const char *)data + sizeof(offset);
+	len = count - sizeof(offset);
+
+	err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
+	if (err)
+		return err;
+	return 0;
+}
+
+static const struct regmap_bus eeprom_93xx46_regmap_bus = {
+	.read = eeprom_93xx46_regmap_read,
+	.write = eeprom_93xx46_regmap_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
 static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
 {
 	struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -245,6 +338,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
 		bits = 9;
 	}
 
+	if (has_quirk_instruction_length(edev)) {
+		cmd_addr <<= 2;
+		bits += 2;
+	}
+
+	dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
+
 	spi_message_init(&m);
 	memset(&t, 0, sizeof(t));
 
@@ -294,12 +394,101 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
 }
 static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
 
+static void select_assert(void *context)
+{
+	struct eeprom_93xx46_dev *edev = context;
+
+	gpiod_set_value_cansleep(edev->pdata->select, 1);
+}
+
+static void select_deassert(void *context)
+{
+	struct eeprom_93xx46_dev *edev = context;
+
+	gpiod_set_value_cansleep(edev->pdata->select, 0);
+}
+
+static const struct of_device_id eeprom_93xx46_of_table[] = {
+	{ .compatible = "eeprom-93xx46", },
+	{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+	{}
+};
+MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+
+static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+{
+	const struct of_device_id *of_id =
+		of_match_device(eeprom_93xx46_of_table, &spi->dev);
+	struct device_node *np = spi->dev.of_node;
+	struct eeprom_93xx46_platform_data *pd;
+	u32 tmp;
+	int gpio;
+	enum of_gpio_flags of_flags;
+	int ret;
+
+	pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "data-size", &tmp);
+	if (ret < 0) {
+		dev_err(&spi->dev, "data-size property not found\n");
+		return ret;
+	}
+
+	if (tmp == 8) {
+		pd->flags |= EE_ADDR8;
+	} else if (tmp == 16) {
+		pd->flags |= EE_ADDR16;
+	} else {
+		dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+		return -EINVAL;
+	}
+
+	if (of_property_read_bool(np, "read-only"))
+		pd->flags |= EE_READONLY;
+
+	gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
+	if (gpio_is_valid(gpio)) {
+		unsigned long flags =
+			of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+
+		ret = devm_gpio_request_one(&spi->dev, gpio, flags,
+					    "eeprom_93xx46_select");
+		if (ret)
+			return ret;
+
+		pd->select = gpio_to_desc(gpio);
+		pd->prepare = select_assert;
+		pd->finish = select_deassert;
+
+		gpiod_direction_output(pd->select, 0);
+	}
+
+	if (of_id->data) {
+		const struct eeprom_93xx46_devtype_data *data = of_id->data;
+
+		pd->quirks = data->quirks;
+	}
+
+	spi->dev.platform_data = pd;
+
+	return 0;
+}
+
 static int eeprom_93xx46_probe(struct spi_device *spi)
 {
 	struct eeprom_93xx46_platform_data *pd;
 	struct eeprom_93xx46_dev *edev;
+	struct regmap *regmap;
 	int err;
 
+	if (spi->dev.of_node) {
+		err = eeprom_93xx46_probe_dt(spi);
+		if (err < 0)
+			return err;
+	}
+
 	pd = spi->dev.platform_data;
 	if (!pd) {
 		dev_err(&spi->dev, "missing platform data\n");
@@ -325,19 +514,34 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
 	edev->spi = spi_dev_get(spi);
 	edev->pdata = pd;
 
-	sysfs_bin_attr_init(&edev->bin);
-	edev->bin.attr.name = "eeprom";
-	edev->bin.attr.mode = S_IRUSR;
-	edev->bin.read = eeprom_93xx46_bin_read;
-	edev->bin.size = 128;
-	if (!(pd->flags & EE_READONLY)) {
-		edev->bin.write = eeprom_93xx46_bin_write;
-		edev->bin.attr.mode |= S_IWUSR;
+	edev->size = 128;
+
+	edev->regmap_config.reg_bits = 32;
+	edev->regmap_config.val_bits = 8;
+	edev->regmap_config.reg_stride = 1;
+	edev->regmap_config.max_register = edev->size - 1;
+
+	regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
+				  &edev->regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "regmap init failed\n");
+		err = PTR_ERR(regmap);
+		goto fail;
 	}
 
-	err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
-	if (err)
+	edev->nvmem_config.name = dev_name(&spi->dev);
+	edev->nvmem_config.dev = &spi->dev;
+	edev->nvmem_config.read_only = pd->flags & EE_READONLY;
+	edev->nvmem_config.root_only = true;
+	edev->nvmem_config.owner = THIS_MODULE;
+	edev->nvmem_config.compat = true;
+	edev->nvmem_config.base_dev = &spi->dev;
+
+	edev->nvmem = nvmem_register(&edev->nvmem_config);
+	if (IS_ERR(edev->nvmem)) {
+		err = PTR_ERR(edev->nvmem);
 		goto fail;
+	}
 
 	dev_info(&spi->dev, "%d-bit eeprom %s\n",
 		(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -359,10 +563,11 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
 {
 	struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
 
+	nvmem_unregister(edev->nvmem);
+
 	if (!(edev->pdata->flags & EE_READONLY))
 		device_remove_file(&spi->dev, &dev_attr_erase);
 
-	sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
 	kfree(edev);
 	return 0;
 }
@@ -370,6 +575,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
 static struct spi_driver eeprom_93xx46_driver = {
 	.driver = {
 		.name	= "93xx46",
+		.of_match_table = of_match_ptr(eeprom_93xx46_of_table),
 	},
 	.probe		= eeprom_93xx46_probe,
 	.remove		= eeprom_93xx46_remove,

+ 1 - 1
drivers/misc/genwqe/card_sysfs.c

@@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj,
 				 struct attribute *attr, int n)
 {
 	unsigned int j;
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct genwqe_dev *cd = dev_get_drvdata(dev);
 	umode_t mode = attr->mode;
 

+ 6 - 3
drivers/misc/ibmasm/ibmasm.h

@@ -34,6 +34,7 @@
 #include <linux/kref.h>
 #include <linux/device.h>
 #include <linux/input.h>
+#include <linux/time64.h>
 
 /* Driver identification */
 #define DRIVER_NAME	"ibmasm"
@@ -53,9 +54,11 @@ extern int ibmasm_debug;
 
 static inline char *get_timestamp(char *buf)
 {
-	struct timeval now;
-	do_gettimeofday(&now);
-	sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+	struct timespec64 now;
+
+	ktime_get_real_ts64(&now);
+	sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec,
+				now.tv_nsec / NSEC_PER_USEC);
 	return buf;
 }
 

+ 4 - 4
drivers/misc/lis3lv02d/lis3lv02d_i2c.c

@@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int lis3lv02d_i2c_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
 
 static int lis3lv02d_i2c_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	/*
@@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int lis3_i2c_runtime_suspend(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	lis3lv02d_poweroff(lis3);
@@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev)
 
 static int lis3_i2c_runtime_resume(struct device *dev)
 {
-	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
 	lis3lv02d_poweron(lis3);

+ 120 - 4
drivers/misc/lkdtm.c

@@ -92,6 +92,9 @@ enum ctype {
 	CT_UNALIGNED_LOAD_STORE_WRITE,
 	CT_OVERWRITE_ALLOCATION,
 	CT_WRITE_AFTER_FREE,
+	CT_READ_AFTER_FREE,
+	CT_WRITE_BUDDY_AFTER_FREE,
+	CT_READ_BUDDY_AFTER_FREE,
 	CT_SOFTLOCKUP,
 	CT_HARDLOCKUP,
 	CT_SPINLOCKUP,
@@ -105,6 +108,7 @@ enum ctype {
 	CT_WRITE_RO,
 	CT_WRITE_RO_AFTER_INIT,
 	CT_WRITE_KERN,
+	CT_WRAP_ATOMIC
 };
 
 static char* cp_name[] = {
@@ -130,6 +134,9 @@ static char* cp_type[] = {
 	"UNALIGNED_LOAD_STORE_WRITE",
 	"OVERWRITE_ALLOCATION",
 	"WRITE_AFTER_FREE",
+	"READ_AFTER_FREE",
+	"WRITE_BUDDY_AFTER_FREE",
+	"READ_BUDDY_AFTER_FREE",
 	"SOFTLOCKUP",
 	"HARDLOCKUP",
 	"SPINLOCKUP",
@@ -143,6 +150,7 @@ static char* cp_type[] = {
 	"WRITE_RO",
 	"WRITE_RO_AFTER_INIT",
 	"WRITE_KERN",
+	"WRAP_ATOMIC"
 };
 
 static struct jprobe lkdtm;
@@ -338,7 +346,7 @@ static noinline void corrupt_stack(void)
 	memset((void *)data, 0, 64);
 }
 
-static void execute_location(void *dst)
+static void noinline execute_location(void *dst)
 {
 	void (*func)(void) = dst;
 
@@ -412,12 +420,109 @@ static void lkdtm_do_action(enum ctype which)
 		break;
 	}
 	case CT_WRITE_AFTER_FREE: {
+		int *base, *again;
 		size_t len = 1024;
-		u32 *data = kmalloc(len, GFP_KERNEL);
+		/*
+		 * The slub allocator uses the first word to store the free
+		 * pointer in some configurations. Use the middle of the
+		 * allocation to avoid running into the freelist
+		 */
+		size_t offset = (len / sizeof(*base)) / 2;
+
+		base = kmalloc(len, GFP_KERNEL);
+		pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+		pr_info("Attempting bad write to freed memory at %p\n",
+			&base[offset]);
+		kfree(base);
+		base[offset] = 0x0abcdef0;
+		/* Attempt to notice the overwrite. */
+		again = kmalloc(len, GFP_KERNEL);
+		kfree(again);
+		if (again != base)
+			pr_info("Hmm, didn't get the same memory range.\n");
 
-		kfree(data);
+		break;
+	}
+	case CT_READ_AFTER_FREE: {
+		int *base, *val, saw;
+		size_t len = 1024;
+		/*
+		 * The slub allocator uses the first word to store the free
+		 * pointer in some configurations. Use the middle of the
+		 * allocation to avoid running into the freelist
+		 */
+		size_t offset = (len / sizeof(*base)) / 2;
+
+		base = kmalloc(len, GFP_KERNEL);
+		if (!base)
+			break;
+
+		val = kmalloc(len, GFP_KERNEL);
+		if (!val)
+			break;
+
+		*val = 0x12345678;
+		base[offset] = *val;
+		pr_info("Value in memory before free: %x\n", base[offset]);
+
+		kfree(base);
+
+		pr_info("Attempting bad read from freed memory\n");
+		saw = base[offset];
+		if (saw != *val) {
+			/* Good! Poisoning happened, so declare a win. */
+			pr_info("Memory correctly poisoned (%x)\n", saw);
+			BUG();
+		}
+		pr_info("Memory was not poisoned\n");
+
+		kfree(val);
+		break;
+	}
+	case CT_WRITE_BUDDY_AFTER_FREE: {
+		unsigned long p = __get_free_page(GFP_KERNEL);
+		if (!p)
+			break;
+		pr_info("Writing to the buddy page before free\n");
+		memset((void *)p, 0x3, PAGE_SIZE);
+		free_page(p);
 		schedule();
-		memset(data, 0x78, len);
+		pr_info("Attempting bad write to the buddy page after free\n");
+		memset((void *)p, 0x78, PAGE_SIZE);
+		/* Attempt to notice the overwrite. */
+		p = __get_free_page(GFP_KERNEL);
+		free_page(p);
+		schedule();
+
+		break;
+	}
+	case CT_READ_BUDDY_AFTER_FREE: {
+		unsigned long p = __get_free_page(GFP_KERNEL);
+		int saw, *val = kmalloc(1024, GFP_KERNEL);
+		int *base;
+
+		if (!p)
+			break;
+
+		if (!val)
+			break;
+
+		base = (int *)p;
+
+		*val = 0x12345678;
+		base[0] = *val;
+		pr_info("Value in memory before free: %x\n", base[0]);
+		free_page(p);
+		pr_info("Attempting to read from freed memory\n");
+		saw = base[0];
+		if (saw != *val) {
+			/* Good! Poisoning happened, so declare a win. */
+			pr_info("Memory correctly poisoned (%x)\n", saw);
+			BUG();
+		}
+		pr_info("Buddy page was not poisoned\n");
+
+		kfree(val);
 		break;
 	}
 	case CT_SOFTLOCKUP:
@@ -548,6 +653,17 @@ static void lkdtm_do_action(enum ctype which)
 		do_overwritten();
 		break;
 	}
+	case CT_WRAP_ATOMIC: {
+		atomic_t under = ATOMIC_INIT(INT_MIN);
+		atomic_t over = ATOMIC_INIT(INT_MAX);
+
+		pr_info("attempting atomic underflow\n");
+		atomic_dec(&under);
+		pr_info("attempting atomic overflow\n");
+		atomic_inc(&over);
+
+		return;
+	}
 	case CT_NONE:
 	default:
 		break;

+ 3 - 3
drivers/misc/mei/Kconfig

@@ -1,6 +1,6 @@
 config INTEL_MEI
 	tristate "Intel Management Engine Interface"
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  The Intel Management Engine (Intel ME) provides Manageability,
 	  Security and Media services for system containing Intel chipsets.
@@ -12,7 +12,7 @@ config INTEL_MEI
 config INTEL_MEI_ME
 	tristate "ME Enabled Intel Chipsets"
 	select INTEL_MEI
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  MEI support for ME Enabled Intel chipsets.
 
@@ -37,7 +37,7 @@ config INTEL_MEI_ME
 config INTEL_MEI_TXE
 	tristate "Intel Trusted Execution Environment with ME Interface"
 	select INTEL_MEI
-	depends on X86 && PCI && WATCHDOG_CORE
+	depends on X86 && PCI
 	help
 	  MEI Support for Trusted Execution Environment device on Intel SoCs
 

+ 0 - 1
drivers/misc/mei/Makefile

@@ -9,7 +9,6 @@ mei-objs += interrupt.o
 mei-objs += client.o
 mei-objs += main.o
 mei-objs += amthif.o
-mei-objs += wd.o
 mei-objs += bus.o
 mei-objs += bus-fixup.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o

+ 46 - 84
drivers/misc/mei/amthif.c

@@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev)
 	dev->iamthif_current_cb = NULL;
 	dev->iamthif_canceled = false;
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-	dev->iamthif_timer = 0;
 	dev->iamthif_stall_timer = 0;
 	dev->iamthif_open_count = 0;
 }
@@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
 	struct mei_cl *cl = &dev->iamthif_cl;
 	int ret;
 
+	if (mei_cl_is_connected(cl))
+		return 0;
+
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
 	mei_cl_init(cl, dev);
 
-	ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+	ret = mei_cl_link(cl);
 	if (ret < 0) {
 		dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
 		return ret;
@@ -80,31 +82,9 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
 
 	ret = mei_cl_connect(cl, me_cl, NULL);
 
-	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
 	return ret;
 }
 
-/**
- * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * Return:   returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
-						struct file *file)
-{
-	struct mei_cl_cb *cb;
-
-	list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list)
-		if (cb->file_object == file)
-			return cb;
-	return NULL;
-}
-
-
 /**
  * mei_amthif_read - read data from AMTHIF client
  *
@@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 {
 	struct mei_cl *cl = file->private_data;
 	struct mei_cl_cb *cb;
-	unsigned long timeout;
 	int rets;
 	int wait_ret;
 
-	/* Only possible if we are in timeout */
-	if (!cl) {
-		dev_err(dev->dev, "bad file ext.\n");
-		return -ETIME;
-	}
-
 	dev_dbg(dev->dev, "checking amthif data\n");
-	cb = mei_amthif_find_read_list_entry(dev, file);
+	cb = mei_cl_read_cb(cl, file);
 
 	/* Check for if we can block or not*/
 	if (cb == NULL && file->f_flags & O_NONBLOCK)
@@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 		/* unlock the Mutex */
 		mutex_unlock(&dev->device_lock);
 
-		wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
-			(cb = mei_amthif_find_read_list_entry(dev, file)));
+		wait_ret = wait_event_interruptible(cl->rx_wait,
+					!list_empty(&cl->rd_completed) ||
+					!mei_cl_is_connected(cl));
 
 		/* Locking again the Mutex */
 		mutex_lock(&dev->device_lock);
@@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 		if (wait_ret)
 			return -ERESTARTSYS;
 
-		dev_dbg(dev->dev, "woke up from sleep\n");
+		if (!mei_cl_is_connected(cl)) {
+			rets = -EBUSY;
+			goto out;
+		}
+
+		cb = mei_cl_read_cb(cl, file);
 	}
 
 	if (cb->status) {
@@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 	}
 
 	dev_dbg(dev->dev, "Got amthif data\n");
-	dev->iamthif_timer = 0;
-
-	timeout = cb->read_time +
-		mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-	dev_dbg(dev->dev, "amthif timeout = %lud\n",
-			timeout);
-
-	if  (time_after(jiffies, timeout)) {
-		dev_dbg(dev->dev, "amthif Time out\n");
-		/* 15 sec for the message has expired */
-		list_del_init(&cb->list);
-		rets = -ETIME;
-		goto free;
-	}
 	/* if the whole message will fit remove it from the list */
 	if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
 		list_del_init(&cb->list);
-	else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+	else if (cb->buf_idx <= *offset) {
 		/* end of the message has been reached */
 		list_del_init(&cb->list);
 		rets = 0;
@@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 		 * remove message from deletion list
 		 */
 
-	dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
-	    cb->buf.size);
-	dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
+	dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
+		cb->buf.size, cb->buf_idx);
 
 	/* length is being truncated to PAGE_SIZE, however,
 	 * the buf_idx may point beyond */
@@ -229,7 +193,7 @@ out:
  *
  * Return: 0 on success, <0 on failure.
  */
-static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
 {
 	struct mei_device *dev = cl->dev;
 	struct mei_cl_cb *cb;
@@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
 	list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
 
 	dev->iamthif_state = MEI_IAMTHIF_READING;
-	dev->iamthif_file_object = cb->file_object;
+	dev->iamthif_fp = cb->fp;
 	dev->iamthif_current_cb = cb;
 
 	return 0;
@@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
 
 	dev->iamthif_state = MEI_IAMTHIF_WRITING;
 	dev->iamthif_current_cb = cb;
-	dev->iamthif_file_object = cb->file_object;
+	dev->iamthif_fp = cb->fp;
 	dev->iamthif_canceled = false;
 
 	ret = mei_cl_write(cl, cb, false);
@@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
 		return ret;
 
 	if (cb->completed)
-		cb->status = mei_amthif_read_start(cl, cb->file_object);
+		cb->status = mei_amthif_read_start(cl, cb->fp);
 
 	return 0;
 }
@@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 
 	dev->iamthif_canceled = false;
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
-	dev->iamthif_timer = 0;
-	dev->iamthif_file_object = NULL;
+	dev->iamthif_fp = NULL;
 
 	dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
 
@@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
 
-	struct mei_device *dev;
-
-	if (WARN_ON(!cl || !cl->dev))
-		return -ENODEV;
+	struct mei_device *dev = cl->dev;
 
-	if (WARN_ON(!cb))
-		return -EINVAL;
+	list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
 
-	dev = cl->dev;
+	/*
+	 * The previous request is still in processing, queue this one.
+	 */
+	if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
+	    dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+		return 0;
 
-	list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
 	return mei_amthif_run_next_cmd(dev);
 }
 
@@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
 {
 	unsigned int mask = 0;
 
-	poll_wait(file, &dev->iamthif_cl.wait, wait);
+	poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
 
 	if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
-	    dev->iamthif_file_object == file) {
+	    dev->iamthif_fp == file) {
 
 		mask |= POLLIN | POLLRDNORM;
 		mei_amthif_run_next_cmd(dev);
@@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 		return ret;
 
 	if (cb->completed)
-		cb->status = mei_amthif_read_start(cl, cb->file_object);
+		cb->status = mei_amthif_read_start(cl, cb->fp);
 
 	return 0;
 }
@@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
 /**
  * mei_amthif_complete - complete amthif callback.
  *
- * @dev: the device structure.
+ * @cl: host client
  * @cb: callback block.
  */
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
+	struct mei_device *dev = cl->dev;
 
 	if (cb->fop_type == MEI_FOP_WRITE) {
 		if (!cb->status) {
@@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
 		 * in case of error enqueue the write cb to complete read list
 		 * so it can be propagated to the reader
 		 */
-		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
-		wake_up_interruptible(&dev->iamthif_cl.wait);
+		list_add_tail(&cb->list, &cl->rd_completed);
+		wake_up_interruptible(&cl->rx_wait);
 		return;
 	}
 
 	if (!dev->iamthif_canceled) {
 		dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
 		dev->iamthif_stall_timer = 0;
-		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+		list_add_tail(&cb->list, &cl->rd_completed);
 		dev_dbg(dev->dev, "amthif read completed\n");
-		dev->iamthif_timer = jiffies;
-		dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
-			dev->iamthif_timer);
 	} else {
 		mei_amthif_run_next_cmd(dev);
 	}
 
 	dev_dbg(dev->dev, "completing amthif call back.\n");
-	wake_up_interruptible(&dev->iamthif_cl.wait);
+	wake_up_interruptible(&cl->rx_wait);
 }
 
 /**
@@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev,
 	/* list all list member */
 	list_for_each_entry_safe(cb, next, mei_cb_list, list) {
 		/* check if list member associated with a file */
-		if (file == cb->file_object) {
+		if (file == cb->fp) {
 			/* check if cb equal to current iamthif cb */
 			if (dev->iamthif_current_cb == cb) {
 				dev->iamthif_current_cb = NULL;
@@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev,
  *
  * Return: true if callback removed from the list, false otherwise
  */
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
 {
 	bool removed = false;
+	struct mei_cl *cl = &dev->iamthif_cl;
 
 	/* remove callbacks associated with a file */
 	mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
-	if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+	if (mei_clear_list(dev, file, &cl->rd_completed))
 		removed = true;
 
 	mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
@@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)
 	/* check if iamthif_current_cb not NULL */
 	if (dev->iamthif_current_cb && !removed) {
 		/* check file and iamthif current cb association */
-		if (dev->iamthif_current_cb->file_object == file) {
+		if (dev->iamthif_current_cb->fp == file) {
 			/* remove cb */
 			mei_io_cb_free(dev->iamthif_current_cb);
 			dev->iamthif_current_cb = NULL;
@@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
 	if (dev->iamthif_open_count > 0)
 		dev->iamthif_open_count--;
 
-	if (dev->iamthif_file_object == file &&
+	if (dev->iamthif_fp == file &&
 	    dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
 		dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",

+ 34 - 7
drivers/misc/mei/bus-fixup.c

@@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
 #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
 			0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
 
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+			    0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
 #define MEI_UUID_ANY NULL_UUID_LE
 
 /**
@@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
  */
 static void number_of_connections(struct mei_cl_device *cldev)
 {
-	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-			__func__, mei_me_cl_uuid(cldev->me_cl));
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
 	if (cldev->me_cl->props.max_number_of_connections > 1)
 		cldev->do_match = 0;
@@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev)
  */
 static void blacklist(struct mei_cl_device *cldev)
 {
-	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-			__func__, mei_me_cl_uuid(cldev->me_cl));
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
 	cldev->do_match = 0;
 }
 
+/**
+ * mei_wd - wd client on the bus, change protocol version
+ *   as the API has changed.
+ *
+ * @cldev: me clients device
+ */
+#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
+#include <linux/pci.h>
+#include "hw-me-regs.h"
+static void mei_wd(struct mei_cl_device *cldev)
+{
+	struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
+
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+	if (pdev->device == MEI_DEV_ID_WPT_LP ||
+	    pdev->device == MEI_DEV_ID_SPT ||
+	    pdev->device == MEI_DEV_ID_SPT_H)
+		cldev->me_cl->props.protocol_version = 0x2;
+
+	cldev->do_match = 1;
+}
+#else
+static inline void mei_wd(struct mei_cl_device *cldev) {}
+#endif /* CONFIG_INTEL_MEI_ME */
+
 struct mei_nfc_cmd {
 	u8 command;
 	u8 status;
@@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev)
 
 	bus = cldev->bus;
 
-	dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
-		__func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+	dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
 	mutex_lock(&bus->device_lock);
 	/* we need to connect to INFO GUID */
-	cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+	cl = mei_cl_alloc_linked(bus);
 	if (IS_ERR(cl)) {
 		ret = PTR_ERR(cl);
 		cl = NULL;
@@ -282,6 +308,7 @@ static struct mei_fixup {
 	MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
 	MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
 	MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+	MEI_FIXUP(MEI_UUID_WD, mei_wd),
 };
 
 /**

+ 45 - 12
drivers/misc/mei/bus.c

@@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking)
 {
 	struct mei_device *bus;
-	struct mei_cl_cb *cb = NULL;
+	struct mei_cl_cb *cb;
 	ssize_t rets;
 
 	if (WARN_ON(!cl || !cl->dev))
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 	bus = cl->dev;
 
 	mutex_lock(&bus->device_lock);
+	if (bus->dev_state != MEI_DEV_ENABLED) {
+		rets = -ENODEV;
+		goto out;
+	}
+
 	if (!mei_cl_is_connected(cl)) {
 		rets = -ENODEV;
 		goto out;
@@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 
 out:
 	mutex_unlock(&bus->device_lock);
-	if (rets < 0)
-		mei_io_cb_free(cb);
 
 	return rets;
 }
@@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 	bus = cl->dev;
 
 	mutex_lock(&bus->device_lock);
+	if (bus->dev_state != MEI_DEV_ENABLED) {
+		rets = -ENODEV;
+		goto out;
+	}
 
 	cb = mei_cl_read_cb(cl, NULL);
 	if (cb)
@@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work)
  * mei_cl_bus_notify_event - schedule notify cb on bus client
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_notify_event(struct mei_cl *cl)
+bool mei_cl_bus_notify_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
 	if (!cldev || !cldev->event_cb)
-		return;
+		return false;
 
 	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
-		return;
+		return false;
 
 	if (!cl->notify_ev)
-		return;
+		return false;
 
 	set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
 
 	schedule_work(&cldev->event_work);
 
 	cl->notify_ev = false;
+
+	return true;
 }
 
 /**
- * mei_cl_bus_rx_event  - schedule rx evenet
+ * mei_cl_bus_rx_event  - schedule rx event
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+bool mei_cl_bus_rx_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
 	if (!cldev || !cldev->event_cb)
-		return;
+		return false;
 
 	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
-		return;
+		return false;
 
 	set_bit(MEI_CL_EVENT_RX, &cldev->events);
 
 	schedule_work(&cldev->event_work);
+
+	return true;
 }
 
 /**
@@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
 	if (!cl) {
 		mutex_lock(&bus->device_lock);
-		cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+		cl = mei_cl_alloc_linked(bus);
 		mutex_unlock(&bus->device_lock);
 		if (IS_ERR(cl))
 			return PTR_ERR(cl);
@@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus)
 	dev_dbg(bus->dev, "rescan end");
 }
 
+void mei_cl_bus_rescan_work(struct work_struct *work)
+{
+	struct mei_device *bus =
+		container_of(work, struct mei_device, bus_rescan_work);
+	struct mei_me_client *me_cl;
+
+	mutex_lock(&bus->device_lock);
+	me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
+	if (me_cl)
+		mei_amthif_host_init(bus, me_cl);
+	mei_me_cl_put(me_cl);
+	mutex_unlock(&bus->device_lock);
+
+	mei_cl_bus_rescan(bus);
+}
+
 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
 				struct module *owner)
 {

+ 90 - 99
drivers/misc/mei/client.c

@@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
  * Return: mei_cl_cb pointer or NULL;
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-				 struct file *fp)
+				 const struct file *fp)
 {
 	struct mei_cl_cb *cb;
 
@@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
 		return NULL;
 
 	INIT_LIST_HEAD(&cb->list);
-	cb->file_object = fp;
+	cb->fp = fp;
 	cb->cl = cl;
 	cb->buf_idx = 0;
 	cb->fop_type = type;
@@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
  * Return: cb on success and NULL on failure
  */
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-				  enum mei_cb_file_ops type, struct file *fp)
+				  enum mei_cb_file_ops type,
+				  const struct file *fp)
 {
 	struct mei_cl_cb *cb;
 
@@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
 	struct mei_cl_cb *cb;
 
 	list_for_each_entry(cb, &cl->rd_completed, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			return cb;
 
 	return NULL;
@@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
 	struct mei_cl_cb *cb, *next;
 
 	list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			mei_io_cb_free(cb);
 
 
 	list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
-		if (!fp || fp == cb->file_object)
+		if (!fp || fp == cb->fp)
 			mei_io_cb_free(cb);
 }
 
@@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
 	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
 	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
 	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
 
 	mei_cl_read_cb_flush(cl, fp);
 
@@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
  * mei_cl_link - allocate host id in the host map
  *
  * @cl: host client
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: 0 on success
  *	-EINVAL on incorrect values
  *	-EMFILE if open count exceeded.
  */
-int mei_cl_link(struct mei_cl *cl, int id)
+int mei_cl_link(struct mei_cl *cl)
 {
 	struct mei_device *dev;
 	long open_handle_count;
+	int id;
 
 	if (WARN_ON(!cl || !cl->dev))
 		return -EINVAL;
 
 	dev = cl->dev;
 
-	/* If Id is not assigned get one*/
-	if (id == MEI_HOST_CLIENT_ID_ANY)
-		id = find_first_zero_bit(dev->host_clients_map,
-					MEI_CLIENTS_MAX);
-
+	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
 	if (id >= MEI_CLIENTS_MAX) {
 		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
 		return -EMFILE;
@@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl)
 	if (!cl)
 		return 0;
 
-	/* wd and amthif might not be initialized */
+	/* amthif might not be initialized */
 	if (!cl->dev)
 		return 0;
 
@@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl)
 	return 0;
 }
 
-
-void mei_host_client_init(struct work_struct *work)
+void mei_host_client_init(struct mei_device *dev)
 {
-	struct mei_device *dev =
-		container_of(work, struct mei_device, init_work);
-	struct mei_me_client *me_cl;
-
-	mutex_lock(&dev->device_lock);
-
-
-	me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
-	if (me_cl)
-		mei_amthif_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
-	me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
-	if (me_cl)
-		mei_wd_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->reset_count = 0;
-	mutex_unlock(&dev->device_lock);
 
-	mei_cl_bus_rescan(dev);
+	schedule_work(&dev->bus_rescan_work);
 
 	pm_runtime_mark_last_busy(dev->dev);
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
@@ -725,6 +702,33 @@ bool mei_hbuf_acquire(struct mei_device *dev)
 	return true;
 }
 
+/**
+ * mei_cl_wake_all - wake up readers, writers and event waiters so
+ *                 they can be interrupted
+ *
+ * @cl: host client
+ */
+static void mei_cl_wake_all(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->rx_wait)) {
+		cl_dbg(dev, cl, "Waking up reading client!\n");
+		wake_up_interruptible(&cl->rx_wait);
+	}
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->tx_wait)) {
+		cl_dbg(dev, cl, "Waking up writing client!\n");
+		wake_up_interruptible(&cl->tx_wait);
+	}
+	/* synchronized under device mutex */
+	if (waitqueue_active(&cl->ev_wait)) {
+		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+		wake_up_interruptible(&cl->ev_wait);
+	}
+}
+
 /**
  * mei_cl_set_disconnected - set disconnected state and clear
  *   associated states and resources
@@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
 		return;
 
 	cl->state = MEI_FILE_DISCONNECTED;
+	mei_io_list_free(&dev->write_list, cl);
+	mei_io_list_free(&dev->write_waiting_list, cl);
 	mei_io_list_flush(&dev->ctrl_rd_list, cl);
 	mei_io_list_flush(&dev->ctrl_wr_list, cl);
+	mei_cl_wake_all(cl);
 	cl->mei_flow_ctrl_creds = 0;
 	cl->timer_count = 0;
 
@@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
  * Return: 0 on success, <0 on failure.
  */
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-		   struct file *file)
+		  const struct file *file)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1119,11 +1126,10 @@ nortpm:
  * mei_cl_alloc_linked - allocate and link host client
  *
  * @dev: the device structure
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: cl on success ERR_PTR on failure
  */
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
 {
 	struct mei_cl *cl;
 	int ret;
@@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
 		goto err;
 	}
 
-	ret = mei_cl_link(cl, id);
+	ret = mei_cl_link(cl);
 	if (ret)
 		goto err;
 
@@ -1149,11 +1155,12 @@ err:
 /**
  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
  *
- * @cl: private data of the file object
+ * @cl: host client
+ * @fp: the file pointer associated with the pointer
  *
  * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
  */
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
 {
 	int rets;
 
@@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
 		return 1;
 
 	if (mei_cl_is_fixed_address(cl)) {
-		rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+		rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
 		if (rets && rets != -EBUSY)
 			return rets;
 		return 1;
@@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
  *	0 on success
  *	-EINVAL when ctrl credits are <= 0
  */
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
 {
 	if (WARN_ON(!cl || !cl->me_cl))
 		return -EINVAL;
@@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
  *
  * Return: 0 on such and error otherwise.
  */
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+int mei_cl_notify_request(struct mei_cl *cl,
+			  const struct file *file, u8 request)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl)
 
 	cl_dbg(dev, cl, "notify event");
 	cl->notify_ev = true;
-	wake_up_interruptible_all(&cl->ev_wait);
+	if (!mei_cl_bus_notify_event(cl))
+		wake_up_interruptible(&cl->ev_wait);
 
 	if (cl->ev_async)
 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
 
-	mei_cl_bus_notify_event(cl);
 }
 
 /**
@@ -1421,6 +1429,25 @@ out:
 	return 0;
 }
 
+/**
+ * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
+ *                        for given host client
+ *
+ * @cl: host client
+ *
+ * Return: true, if found at least one cb.
+ */
+static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+	struct mei_cl_cb *cb;
+
+	list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
+		if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
+			return true;
+	return false;
+}
+
 /**
  * mei_cl_read_start - the start read client message function.
  *
@@ -1430,7 +1457,7 @@ out:
  *
  * Return: 0 on success, <0 on failure.
  */
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
@@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
 		return -ENODEV;
 
 	/* HW currently supports only one pending read */
-	if (!list_empty(&cl->rd_pending))
+	if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
 		return -EBUSY;
 
 	if (!mei_me_cl_is_active(cl->me_cl)) {
@@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 
 	first_chunk = cb->buf_idx == 0;
 
-	rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
+	rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
 	if (rets < 0)
 		return rets;
 
@@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 		return 0;
 	}
 
-	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
+	cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
 			cb->buf.size, cb->buf_idx);
 
 	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
@@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 	if (rets < 0 && rets != -EINPROGRESS) {
 		pm_runtime_put_noidle(dev->dev);
 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
-		return rets;
+		goto free;
 	}
 
 	cb->buf_idx = 0;
@@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 	mei_hdr.msg_complete = 0;
 	mei_hdr.internal = cb->internal;
 
-	rets = mei_cl_flow_ctrl_creds(cl);
+	rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
 	if (rets < 0)
 		goto err;
 
@@ -1677,7 +1704,8 @@ out:
 
 		mutex_unlock(&dev->device_lock);
 		rets = wait_event_interruptible(cl->tx_wait,
-				cl->writing_state == MEI_WRITE_COMPLETE);
+				cl->writing_state == MEI_WRITE_COMPLETE ||
+				(!mei_cl_is_connected(cl)));
 		mutex_lock(&dev->device_lock);
 		/* wait_event_interruptible returns -ERESTARTSYS */
 		if (rets) {
@@ -1685,6 +1713,10 @@ out:
 				rets = -EINTR;
 			goto err;
 		}
+		if (cl->writing_state != MEI_WRITE_COMPLETE) {
+			rets = -EFAULT;
+			goto err;
+		}
 	}
 
 	rets = size;
@@ -1692,6 +1724,8 @@ err:
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
+free:
+	mei_io_cb_free(cb);
 
 	return rets;
 }
@@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 
 	case MEI_FOP_READ:
 		list_add_tail(&cb->list, &cl->rd_completed);
-		if (waitqueue_active(&cl->rx_wait))
-			wake_up_interruptible_all(&cl->rx_wait);
-		else
-			mei_cl_bus_rx_event(cl);
+		if (!mei_cl_bus_rx_event(cl))
+			wake_up_interruptible(&cl->rx_wait);
 		break;
 
 	case MEI_FOP_CONNECT:
@@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev)
 	list_for_each_entry(cl, &dev->file_list, link)
 		mei_cl_set_disconnected(cl);
 }
-
-
-/**
- * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
- *
- * @dev: mei device
- */
-void mei_cl_all_wakeup(struct mei_device *dev)
-{
-	struct mei_cl *cl;
-
-	list_for_each_entry(cl, &dev->file_list, link) {
-		if (waitqueue_active(&cl->rx_wait)) {
-			cl_dbg(dev, cl, "Waking up reading client!\n");
-			wake_up_interruptible(&cl->rx_wait);
-		}
-		if (waitqueue_active(&cl->tx_wait)) {
-			cl_dbg(dev, cl, "Waking up writing client!\n");
-			wake_up_interruptible(&cl->tx_wait);
-		}
-
-		/* synchronized under device mutex */
-		if (waitqueue_active(&cl->ev_wait)) {
-			cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
-			wake_up_interruptible(&cl->ev_wait);
-		}
-	}
-}
-
-/**
- * mei_cl_all_write_clear - clear all pending writes
- *
- * @dev: mei device
- */
-void mei_cl_all_write_clear(struct mei_device *dev)
-{
-	mei_io_list_free(&dev->write_list, NULL);
-	mei_io_list_free(&dev->write_waiting_list, NULL);
-}
-
-

+ 13 - 14
drivers/misc/mei/client.h

@@ -18,7 +18,6 @@
 #define _MEI_CLIENT_H_
 
 #include <linux/types.h>
-#include <linux/watchdog.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
 
@@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
  * MEI IO Functions
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-				 struct file *fp);
+				 const struct file *fp);
 void mei_io_cb_free(struct mei_cl_cb *priv_cb);
 int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
 
@@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev);
 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
 
 
-int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_link(struct mei_cl *cl);
 int mei_cl_unlink(struct mei_cl *cl);
 
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
 
 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
 				 const struct file *fp);
 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-				  enum mei_cb_file_ops type, struct file *fp);
+				  enum mei_cb_file_ops type,
+				  const struct file *fp);
 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
 
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
-
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
 /*
  *  MEI input output function prototype
  */
@@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl);
 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
 			  struct mei_cl_cb *cmpl_list);
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-		   struct file *file);
+		   const struct file *file);
 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
 			      struct mei_cl_cb *cmpl_list);
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
 			struct mei_cl_cb *cmpl_list);
 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
@@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 
 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 
-void mei_host_client_init(struct work_struct *work);
+void mei_host_client_init(struct mei_device *dev);
 
 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_notify_request(struct mei_cl *cl,
+			  const struct file *file, u8 request);
 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
 		      struct mei_cl_cb *cmpl_list);
 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
 void mei_cl_notify(struct mei_cl *cl);
 
 void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_wakeup(struct mei_device *dev);
-void mei_cl_all_write_clear(struct mei_device *dev);
 
 #define MEI_CL_FMT "cl:host=%02d me=%02d "
 #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
@@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev);
 #define cl_dbg(dev, cl, format, arg...) \
 	dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 
+#define cl_warn(dev, cl, format, arg...) \
+	dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
 #define cl_err(dev, cl, format, arg...) \
 	dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 

+ 55 - 10
drivers/misc/mei/debugfs.c

@@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
 	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
 
 	/*  if the driver is not enabled the list won't be consistent */
 	if (dev->dev_state != MEI_DEV_ENABLED)
@@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
 {
 	struct mei_device *dev = fp->private_data;
 	struct mei_cl *cl;
-	const size_t bufsz = 1024;
+	size_t bufsz = 1;
 	char *buf;
 	int i = 0;
 	int pos = 0;
 	int ret;
 
+#define HDR "   |me|host|state|rd|wr|\n"
+
 	if (!dev)
 		return -ENODEV;
 
+	mutex_lock(&dev->device_lock);
+
+	/*
+	 * if the driver is not enabled the list won't be consistent,
+	 * we output empty table
+	 */
+	if (dev->dev_state == MEI_DEV_ENABLED)
+		list_for_each_entry(cl, &dev->file_list, link)
+			bufsz++;
+
+	bufsz *= sizeof(HDR) + 1;
+
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if  (!buf)
+	if  (!buf) {
+		mutex_unlock(&dev->device_lock);
 		return -ENOMEM;
+	}
 
-	pos += scnprintf(buf + pos, bufsz - pos,
-			"  |me|host|state|rd|wr|\n");
-
-	mutex_lock(&dev->device_lock);
+	pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
 
 	/*  if the driver is not enabled the list won't be consistent */
 	if (dev->dev_state != MEI_DEV_ENABLED)
@@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
 	list_for_each_entry(cl, &dev->file_list, link) {
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-			"%2d|%2d|%4d|%5d|%2d|%2d|\n",
+			"%3d|%2d|%4d|%5d|%2d|%2d|\n",
 			i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
 			!list_empty(&cl->rd_completed), cl->writing_state);
 		i++;
@@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
 	pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
 			mei_hbm_state_str(dev->hbm_state));
 
-	if (dev->hbm_state == MEI_HBM_STARTED) {
+	if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
+	    dev->hbm_state <= MEI_HBM_STARTED) {
 		pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
 				 dev->hbm_f_pg_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
 				 dev->hbm_f_dc_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
+				 dev->hbm_f_ie_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
 				 dev->hbm_f_dot_supported);
 		pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
 				 dev->hbm_f_ev_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
+				 dev->hbm_f_fa_supported);
 	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
@@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
 	.llseek = generic_file_llseek,
 };
 
+static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct mei_device *dev;
+	int ret;
+
+	dev = container_of(file->private_data,
+			   struct mei_device, allow_fixed_address);
+
+	ret = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (ret < 0)
+		return ret;
+	dev->override_fixed_address = true;
+	return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_allow_fa = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = mei_dbgfs_write_allow_fa,
+	.llseek = generic_file_llseek,
+};
+
 /**
  * mei_dbgfs_deregister - Remove the debugfs files and directories
  *
@@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
 		dev_err(dev->dev, "devstate: registration failed\n");
 		goto err;
 	}
-	f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
-				&dev->allow_fixed_address);
+	f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+				&dev->allow_fixed_address,
+				&mei_dbgfs_fops_allow_fa);
 	if (!f) {
 		dev_err(dev->dev, "allow_fixed_address: registration failed\n");
 		goto err;

+ 20 - 4
drivers/misc/mei/hbm.c

@@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
 	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
 	memset(enum_req, 0, len);
 	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-	enum_req->allow_add = dev->hbm_f_dc_supported;
+	enum_req->flags |= dev->hbm_f_dc_supported ?
+			   MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+	enum_req->flags |= dev->hbm_f_ie_supported ?
+			   MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
 
 	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
 	if (ret) {
@@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
 	if (ret)
 		status = !MEI_HBMS_SUCCESS;
 
+	if (dev->dev_state == MEI_DEV_ENABLED)
+		schedule_work(&dev->bus_rescan_work);
+
 	return mei_hbm_add_cl_resp(dev, req->me_addr, status);
 }
 
@@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
 	/* We got all client properties */
 	if (next_client_index == MEI_CLIENTS_MAX) {
 		dev->hbm_state = MEI_HBM_STARTED;
-		schedule_work(&dev->init_work);
+		mei_host_client_init(dev);
 
 		return 0;
 	}
@@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
 		cl->state = MEI_FILE_CONNECTED;
 	else {
 		cl->state = MEI_FILE_DISCONNECT_REPLY;
-		if (rs->status == MEI_CL_CONN_NOT_FOUND)
+		if (rs->status == MEI_CL_CONN_NOT_FOUND) {
 			mei_me_cl_del(dev, cl->me_cl);
+			if (dev->dev_state == MEI_DEV_ENABLED)
+				schedule_work(&dev->bus_rescan_work);
+		}
 	}
 	cl->status = mei_cl_conn_status_to_errno(rs->status);
 }
@@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
 
 	cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
 	if (cl) {
-		cl_dbg(dev, cl, "fw disconnect request received\n");
+		cl_warn(dev, cl, "fw disconnect request received\n");
 		cl->state = MEI_FILE_DISCONNECTING;
 		cl->timer_count = 0;
 
@@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev)
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
 		dev->hbm_f_dc_supported = 1;
 
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
+		dev->hbm_f_ie_supported = 1;
+
 	/* disconnect on connect timeout instead of link reset */
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
 		dev->hbm_f_dot_supported = 1;
@@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
 	/* Notification Event Support */
 	if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
 		dev->hbm_f_ev_supported = 1;
+
+	/* Fixed Address Client Support */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
+		dev->hbm_f_fa_supported = 1;
 }
 
 /**

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно