Browse Source

Merge tag 'char-misc-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver updates from Greg KH:
 "Here is the big char/misc driver update for 4.14-rc1.

  Lots of different stuff in here, it's been an active development cycle
  for some reason. Highlights are:

   - updated binder driver, this brings binder up to date with what
     shipped in the Android O release, plus some more changes that
     happened since then that are in the Android development trees.

   - coresight updates and fixes

   - mux driver file renames to be a bit "nicer"

   - intel_th driver updates

   - normal set of hyper-v updates and changes

   - small fpga subsystem and driver updates

   - lots of const code changes all over the driver trees

   - extcon driver updates

   - fmc driver subsystem upadates

   - w1 subsystem minor reworks and new features and drivers added

   - spmi driver updates

  Plus a smattering of other minor driver updates and fixes.

  All of these have been in linux-next with no reported issues for a
  while"

* tag 'char-misc-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (244 commits)
  ANDROID: binder: don't queue async transactions to thread.
  ANDROID: binder: don't enqueue death notifications to thread todo.
  ANDROID: binder: Don't BUG_ON(!spin_is_locked()).
  ANDROID: binder: Add BINDER_GET_NODE_DEBUG_INFO ioctl
  ANDROID: binder: push new transactions to waiting threads.
  ANDROID: binder: remove proc waitqueue
  android: binder: Add page usage in binder stats
  android: binder: fixup crash introduced by moving buffer hdr
  drivers: w1: add hwmon temp support for w1_therm
  drivers: w1: refactor w1_slave_show to make the temp reading functionality separate
  drivers: w1: add hwmon support structures
  eeprom: idt_89hpesx: Support both ACPI and OF probing
  mcb: Fix an error handling path in 'chameleon_parse_cells()'
  MCB: add support for SC31 to mcb-lpc
  mux: make device_type const
  char: virtio: constify attribute_group structures.
  Documentation/ABI: document the nvmem sysfs files
  lkdtm: fix spelling mistake: "incremeted" -> "incremented"
  perf: cs-etm: Fix ETMv4 CONFIGR entry in perf.data file
  nvmem: include linux/err.h from header
  ...
Linus Torvalds 8 years ago
parent
commit
bafb0762cb
100 changed files with 7320 additions and 2180 deletions
  1. 19 0
      Documentation/ABI/stable/sysfs-bus-nvmem
  2. 2 0
      Documentation/ABI/testing/sysfs-bus-thunderbolt
  3. 8 0
      Documentation/ABI/testing/sysfs-driver-altera-cvp
  4. 5 0
      Documentation/admin-guide/devices.txt
  5. 2 2
      Documentation/devicetree/bindings/arm/coresight.txt
  6. 24 0
      Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
  7. 29 0
      Documentation/devicetree/bindings/fpga/altera-passive-serial.txt
  8. 36 0
      Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
  9. 1 0
      Documentation/devicetree/bindings/vendor-prefixes.txt
  10. 2 0
      Documentation/devicetree/bindings/xilinx.txt
  11. 12 1
      Documentation/trace/stm.txt
  12. 3 1
      MAINTAINERS
  13. 16 0
      arch/arm/boot/dts/imx6q-evi.dts
  14. 2 0
      arch/x86/include/asm/mshyperv.h
  15. 9 3
      arch/x86/kernel/cpu/mshyperv.c
  16. 13 5
      block/genhd.c
  17. 12 2
      drivers/android/Kconfig
  18. 2 1
      drivers/android/Makefile
  19. 2556 1312
      drivers/android/binder.c
  20. 1009 0
      drivers/android/binder_alloc.c
  21. 187 0
      drivers/android/binder_alloc.h
  22. 310 0
      drivers/android/binder_alloc_selftest.c
  23. 77 19
      drivers/android/binder_trace.h
  24. 3 3
      drivers/auxdisplay/panel.c
  25. 1 1
      drivers/char/applicom.c
  26. 25 23
      drivers/char/mwave/smapi.c
  27. 0 3
      drivers/char/ppdev.c
  28. 1 1
      drivers/char/tlclk.c
  29. 1 1
      drivers/char/virtio_console.c
  30. 21 18
      drivers/char/xilinx_hwicap/xilinx_hwicap.c
  31. 9 4
      drivers/char/xilinx_hwicap/xilinx_hwicap.h
  32. 7 0
      drivers/extcon/Kconfig
  33. 1 0
      drivers/extcon/Makefile
  34. 24 26
      drivers/extcon/devres.c
  35. 1 1
      drivers/extcon/extcon-intel-int3496.c
  36. 2 3
      drivers/extcon/extcon-max77693.c
  37. 417 0
      drivers/extcon/extcon-usbc-cros-ec.c
  38. 136 143
      drivers/extcon/extcon.c
  39. 5 5
      drivers/firmware/google/vpd.c
  40. 1 0
      drivers/fmc/Makefile
  41. 1 2
      drivers/fmc/fmc-chardev.c
  42. 90 5
      drivers/fmc/fmc-core.c
  43. 173 0
      drivers/fmc/fmc-debug.c
  44. 0 41
      drivers/fmc/fmc-dump.c
  45. 1 1
      drivers/fmc/fmc-match.c
  46. 9 0
      drivers/fmc/fmc-private.h
  47. 25 94
      drivers/fmc/fmc-sdb.c
  48. 8 12
      drivers/fmc/fmc-trivial.c
  49. 4 4
      drivers/fmc/fmc-write-eeprom.c
  50. 1 2
      drivers/fmc/fru-parse.c
  51. 15 5
      drivers/fpga/Kconfig
  52. 2 0
      drivers/fpga/Makefile
  53. 500 0
      drivers/fpga/altera-cvp.c
  54. 8 4
      drivers/fpga/altera-hps2fpga.c
  55. 308 0
      drivers/fpga/altera-ps-spi.c
  56. 2 2
      drivers/fpga/fpga-region.c
  57. 2 2
      drivers/fsi/fsi-core.c
  58. 4 6
      drivers/fsi/fsi-scom.c
  59. 14 0
      drivers/hv/channel.c
  60. 26 3
      drivers/hv/channel_mgmt.c
  61. 6 6
      drivers/hv/hv_balloon.c
  62. 1 1
      drivers/hv/hv_kvp.c
  63. 60 109
      drivers/hv/ring_buffer.c
  64. 3 0
      drivers/hv/vmbus_drv.c
  65. 5 5
      drivers/hwtracing/coresight/Kconfig
  66. 1 1
      drivers/hwtracing/coresight/Makefile
  67. 1 1
      drivers/hwtracing/coresight/coresight-cpu-debug.c
  68. 30 4
      drivers/hwtracing/coresight/coresight-dynamic-replicator.c
  69. 43 25
      drivers/hwtracing/coresight/coresight-etb10.c
  70. 3 1
      drivers/hwtracing/coresight/coresight-etm-perf.c
  71. 1 0
      drivers/hwtracing/coresight/coresight-etm.h
  72. 13 13
      drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
  73. 18 4
      drivers/hwtracing/coresight/coresight-etm3x.c
  74. 12 12
      drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
  75. 5 1
      drivers/hwtracing/coresight/coresight-etm4x.c
  76. 6 1
      drivers/hwtracing/coresight/coresight-funnel.c
  77. 34 5
      drivers/hwtracing/coresight/coresight-priv.h
  78. 25 24
      drivers/hwtracing/coresight/coresight-stm.c
  79. 35 7
      drivers/hwtracing/coresight/coresight-tmc-etf.c
  80. 37 12
      drivers/hwtracing/coresight/coresight-tmc-etr.c
  81. 85 23
      drivers/hwtracing/coresight/coresight-tmc.c
  82. 84 1
      drivers/hwtracing/coresight/coresight-tmc.h
  83. 6 1
      drivers/hwtracing/coresight/coresight-tpiu.c
  84. 8 0
      drivers/hwtracing/coresight/coresight.c
  85. 252 107
      drivers/hwtracing/intel_th/core.c
  86. 30 10
      drivers/hwtracing/intel_th/gth.c
  87. 5 0
      drivers/hwtracing/intel_th/gth.h
  88. 84 20
      drivers/hwtracing/intel_th/intel_th.h
  89. 6 6
      drivers/hwtracing/intel_th/msu.c
  90. 65 2
      drivers/hwtracing/intel_th/pci.c
  91. 110 5
      drivers/hwtracing/intel_th/pti.c
  92. 8 0
      drivers/hwtracing/intel_th/pti.h
  93. 1 1
      drivers/hwtracing/stm/core.c
  94. 15 0
      drivers/mcb/mcb-lpc.c
  95. 4 2
      drivers/mcb/mcb-parse.c
  96. 1 0
      drivers/misc/Makefile
  97. 2 2
      drivers/misc/apds9802als.c
  98. 1 1
      drivers/misc/apds990x.c
  99. 29 5
      drivers/misc/aspeed-lpc-snoop.c
  100. 1 1
      drivers/misc/bh1770glc.c

+ 19 - 0
Documentation/ABI/stable/sysfs-bus-nvmem

@@ -0,0 +1,19 @@
+What:		/sys/bus/nvmem/devices/.../nvmem
+Date:		July 2015
+KernelVersion:  4.2
+Contact:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Description:
+		This file allows user to read/write the raw NVMEM contents.
+		Permissions for write to this file depends on the nvmem
+		provider configuration.
+
+		ex:
+		hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
+
+		0000000 0000 0000 0000 0000 0000 0000 0000 0000
+		*
+		00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
+		0000000 0000 0000 0000 0000 0000 0000 0000 0000
+		...
+		*
+		0001000

+ 2 - 0
Documentation/ABI/testing/sysfs-bus-thunderbolt

@@ -45,6 +45,8 @@ Contact:	thunderbolt-software@lists.01.org
 Description:	When a devices supports Thunderbolt secure connect it will
 Description:	When a devices supports Thunderbolt secure connect it will
 		have this attribute. Writing 32 byte hex string changes
 		have this attribute. Writing 32 byte hex string changes
 		authorization to use the secure connection method instead.
 		authorization to use the secure connection method instead.
+		Writing an empty string clears the key and regular connection
+		method can be used again.
 
 
 What:		/sys/bus/thunderbolt/devices/.../device
 What:		/sys/bus/thunderbolt/devices/.../device
 Date:		Sep 2017
 Date:		Sep 2017

+ 8 - 0
Documentation/ABI/testing/sysfs-driver-altera-cvp

@@ -0,0 +1,8 @@
+What:		/sys/bus/pci/drivers/altera-cvp/chkcfg
+Date:		May 2017
+Kernel Version:	4.13
+Contact:	Anatolij Gustschin <agust@denx.de>
+Description:
+		Contains either 1 or 0 and controls if configuration
+		error checking in altera-cvp driver is turned on or
+		off.

+ 5 - 0
Documentation/admin-guide/devices.txt

@@ -3081,3 +3081,8 @@
 		  1 = /dev/osd1		Second OSD Device
 		  1 = /dev/osd1		Second OSD Device
 		  ...
 		  ...
 		  255 = /dev/osd255	256th OSD Device
 		  255 = /dev/osd255	256th OSD Device
+
+ 384-511 char	RESERVED FOR DYNAMIC ASSIGNMENT
+		Character devices that request a dynamic allocation of major
+		number will take numbers starting from 511 and downward,
+		once the 234-254 range is full.

+ 2 - 2
Documentation/devicetree/bindings/arm/coresight.txt

@@ -34,8 +34,8 @@ its hardware characteristcs.
 		- Embedded Trace Macrocell (version 4.x):
 		- Embedded Trace Macrocell (version 4.x):
 			"arm,coresight-etm4x", "arm,primecell";
 			"arm,coresight-etm4x", "arm,primecell";
 
 
-		- Qualcomm Configurable Replicator (version 1.x):
-			"qcom,coresight-replicator1x", "arm,primecell";
+		- Coresight programmable Replicator :
+			"arm,coresight-dynamic-replicator", "arm,primecell";
 
 
 		- System Trace Macrocell:
 		- System Trace Macrocell:
 			"arm,coresight-stm", "arm,primecell"; [1]
 			"arm,coresight-stm", "arm,primecell"; [1]

+ 24 - 0
Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt

@@ -0,0 +1,24 @@
+ChromeOS EC USB Type-C cable and accessories detection
+
+On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
+able to detect the state of external accessories such as display adapters
+or USB devices when said accessories are attached or detached.
+
+The node for this device must be under a cros-ec node like google,cros-ec-spi
+or google,cros-ec-i2c.
+
+Required properties:
+- compatible:		Should be "google,extcon-usbc-cros-ec".
+- google,usb-port-id:	Specifies the USB port ID to use.
+
+Example:
+	cros-ec@0 {
+		compatible = "google,cros-ec-i2c";
+
+		...
+
+		extcon {
+			compatible = "google,extcon-usbc-cros-ec";
+			google,usb-port-id = <0>;
+		};
+	}

+ 29 - 0
Documentation/devicetree/bindings/fpga/altera-passive-serial.txt

@@ -0,0 +1,29 @@
+Altera Passive Serial SPI FPGA Manager
+
+Altera FPGAs support a method of loading the bitstream over what is
+referred to as "passive serial".
+The passive serial link is not technically SPI, and might require extra
+circuits in order to play nicely with other SPI slaves on the same bus.
+
+See https://www.altera.com/literature/hb/cyc/cyc_c51013.pdf
+
+Required properties:
+- compatible: Must be one of the following:
+	"altr,fpga-passive-serial",
+	"altr,fpga-arria10-passive-serial"
+- reg: SPI chip select of the FPGA
+- nconfig-gpios: config pin (referred to as nCONFIG in the manual)
+- nstat-gpios: status pin (referred to as nSTATUS in the manual)
+
+Optional properties:
+- confd-gpios: confd pin (referred to as CONF_DONE in the manual)
+
+Example:
+	fpga: fpga@0 {
+		compatible = "altr,fpga-passive-serial";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+		nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+		nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+		confd-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
+	};

+ 36 - 0
Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt

@@ -0,0 +1,36 @@
+Xilinx LogiCORE Partial Reconfig Decoupler Softcore
+
+The Xilinx LogiCORE Partial Reconfig Decoupler manages one or more
+decouplers / fpga bridges.
+The controller can decouple/disable the bridges which prevents signal
+changes from passing through the bridge.  The controller can also
+couple / enable the bridges which allows traffic to pass through the
+bridge normally.
+
+The Driver supports only MMIO handling. A PR region can have multiple
+PR Decouplers which can be handled independently or chained via decouple/
+decouple_status signals.
+
+Required properties:
+- compatible		: Should contain "xlnx,pr-decoupler-1.00" followed by
+                          "xlnx,pr-decoupler"
+- regs			: base address and size for decoupler module
+- clocks		: input clock to IP
+- clock-names		: should contain "aclk"
+
+Optional properties:
+- bridge-enable		: 0 if driver should disable bridge at startup
+			  1 if driver should enable bridge at startup
+			  Default is to leave bridge in current state.
+
+See Documentation/devicetree/bindings/fpga/fpga-region.txt for generic bindings.
+
+Example:
+	fpga-bridge@100000450 {
+		compatible = "xlnx,pr-decoupler-1.00",
+			     "xlnx-pr-decoupler";
+		regs = <0x10000045 0x10>;
+		clocks = <&clkc 15>;
+		clock-names = "aclk";
+		bridge-enable = <0>;
+	};

+ 1 - 0
Documentation/devicetree/bindings/vendor-prefixes.txt

@@ -175,6 +175,7 @@ kosagi	Sutajio Ko-Usagi PTE Ltd.
 kyo	Kyocera Corporation
 kyo	Kyocera Corporation
 lacie	LaCie
 lacie	LaCie
 lantiq	Lantiq Semiconductor
 lantiq	Lantiq Semiconductor
+lattice	Lattice Semiconductor
 lego	LEGO Systems A/S
 lego	LEGO Systems A/S
 lenovo	Lenovo Group Ltd.
 lenovo	Lenovo Group Ltd.
 lg	LG Corporation
 lg	LG Corporation

+ 2 - 0
Documentation/devicetree/bindings/xilinx.txt

@@ -281,6 +281,8 @@
                       capabilities of the underlying ICAP hardware
                       capabilities of the underlying ICAP hardware
                       differ between different families.  May be
                       differ between different families.  May be
                       'virtex2p', 'virtex4', or 'virtex5'.
                       'virtex2p', 'virtex4', or 'virtex5'.
+		- compatible : should contain "xlnx,xps-hwicap-1.00.a" or
+				"xlnx,opb-hwicap-1.00.b".
 
 
       vi) Xilinx Uart 16550
       vi) Xilinx Uart 16550
 
 

+ 12 - 1
Documentation/trace/stm.txt

@@ -83,7 +83,7 @@ by writing the name of the desired stm device there, for example:
 $ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
 $ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
 
 
 For examples on how to use stm_source interface in the kernel, refer
 For examples on how to use stm_source interface in the kernel, refer
-to stm_console or stm_heartbeat drivers.
+to stm_console, stm_heartbeat or stm_ftrace drivers.
 
 
 Each stm_source device will need to assume a master and a range of
 Each stm_source device will need to assume a master and a range of
 channels, depending on how many channels it requires. These are
 channels, depending on how many channels it requires. These are
@@ -107,5 +107,16 @@ console in the STP stream, create a "console" policy entry (see the
 beginning of this text on how to do that). When initialized, it will
 beginning of this text on how to do that). When initialized, it will
 consume one channel.
 consume one channel.
 
 
+stm_ftrace
+==========
+
+This is another "stm_source" device, once the stm_ftrace has been
+linked with an stm device, and if "function" tracer is enabled,
+function address and parent function address which Ftrace subsystem
+would store into ring buffer will be exported via the stm device at
+the same time.
+
+Currently only Ftrace "function" tracer is supported.
+
 [1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
 [1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
 [2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
 [2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html

+ 3 - 1
MAINTAINERS

@@ -5362,10 +5362,11 @@ K:	fmc_d.*register
 
 
 FPGA MANAGER FRAMEWORK
 FPGA MANAGER FRAMEWORK
 M:	Alan Tull <atull@kernel.org>
 M:	Alan Tull <atull@kernel.org>
-R:	Moritz Fischer <moritz.fischer@ettus.com>
+R:	Moritz Fischer <mdf@kernel.org>
 L:	linux-fpga@vger.kernel.org
 L:	linux-fpga@vger.kernel.org
 S:	Maintained
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
+Q:	http://patchwork.kernel.org/project/linux-fpga/list/
 F:	Documentation/fpga/
 F:	Documentation/fpga/
 F:	Documentation/devicetree/bindings/fpga/
 F:	Documentation/devicetree/bindings/fpga/
 F:	drivers/fpga/
 F:	drivers/fpga/
@@ -9484,6 +9485,7 @@ M:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 S:	Maintained
 S:	Maintained
 F:	drivers/nvmem/
 F:	drivers/nvmem/
 F:	Documentation/devicetree/bindings/nvmem/
 F:	Documentation/devicetree/bindings/nvmem/
+F:	Documentation/ABI/stable/sysfs-bus-nvmem
 F:	include/linux/nvmem-consumer.h
 F:	include/linux/nvmem-consumer.h
 F:	include/linux/nvmem-provider.h
 F:	include/linux/nvmem-provider.h
 
 

+ 16 - 0
arch/arm/boot/dts/imx6q-evi.dts

@@ -94,6 +94,15 @@
 	pinctrl-names = "default";
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1cs>;
 	pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1cs>;
 	status = "okay";
 	status = "okay";
+
+	fpga: fpga@0 {
+		compatible = "altr,fpga-passive-serial";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+		pinctrl-0 = <&pinctrl_fpgaspi>;
+		nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+		nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+	};
 };
 };
 
 
 &ecspi3 {
 &ecspi3 {
@@ -319,6 +328,13 @@
 		>;
 		>;
 	};
 	};
 
 
+	pinctrl_fpgaspi: fpgaspigrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0
+			MX6QDL_PAD_KEY_ROW2__GPIO4_IO11 0x1b0b0
+		>;
+	};
+
 	pinctrl_gpminand: gpminandgrp {
 	pinctrl_gpminand: gpminandgrp {
 		fsl,pins = <
 		fsl,pins = <
 			MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
 			MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1

+ 2 - 0
arch/x86/include/asm/mshyperv.h

@@ -28,6 +28,8 @@ struct ms_hyperv_info {
 	u32 features;
 	u32 features;
 	u32 misc_features;
 	u32 misc_features;
 	u32 hints;
 	u32 hints;
+	u32 max_vp_index;
+	u32 max_lp_index;
 };
 };
 
 
 extern struct ms_hyperv_info ms_hyperv;
 extern struct ms_hyperv_info ms_hyperv;

+ 9 - 3
arch/x86/kernel/cpu/mshyperv.c

@@ -179,9 +179,15 @@ static void __init ms_hyperv_init_platform(void)
 	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
 	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
 
-	pr_info("HyperV: features 0x%x, hints 0x%x\n",
+	pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
 		ms_hyperv.features, ms_hyperv.hints);
 		ms_hyperv.features, ms_hyperv.hints);
 
 
+	ms_hyperv.max_vp_index = cpuid_eax(HVCPUID_IMPLEMENTATION_LIMITS);
+	ms_hyperv.max_lp_index = cpuid_ebx(HVCPUID_IMPLEMENTATION_LIMITS);
+
+	pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
+		 ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
+
 	/*
 	/*
 	 * Extract host information.
 	 * Extract host information.
 	 */
 	 */
@@ -214,7 +220,7 @@ static void __init ms_hyperv_init_platform(void)
 		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
 		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
 		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
 		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
 		lapic_timer_frequency = hv_lapic_frequency;
 		lapic_timer_frequency = hv_lapic_frequency;
-		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+		pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
 			lapic_timer_frequency);
 			lapic_timer_frequency);
 	}
 	}
 
 
@@ -248,7 +254,7 @@ static void __init ms_hyperv_init_platform(void)
 }
 }
 
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
-	.name			= "Microsoft HyperV",
+	.name			= "Microsoft Hyper-V",
 	.detect			= ms_hyperv_platform,
 	.detect			= ms_hyperv_platform,
 	.init_platform		= ms_hyperv_init_platform,
 	.init_platform		= ms_hyperv_init_platform,
 };
 };

+ 13 - 5
block/genhd.c

@@ -242,6 +242,7 @@ EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
  * Can be deleted altogether. Later.
  * Can be deleted altogether. Later.
  *
  *
  */
  */
+#define BLKDEV_MAJOR_HASH_SIZE 255
 static struct blk_major_name {
 static struct blk_major_name {
 	struct blk_major_name *next;
 	struct blk_major_name *next;
 	int major;
 	int major;
@@ -259,12 +260,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
 {
 {
 	struct blk_major_name *dp;
 	struct blk_major_name *dp;
 
 
-	if (offset < BLKDEV_MAJOR_HASH_SIZE) {
-		mutex_lock(&block_class_lock);
-		for (dp = major_names[offset]; dp; dp = dp->next)
+	mutex_lock(&block_class_lock);
+	for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
+		if (dp->major == offset)
 			seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
 			seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
-		mutex_unlock(&block_class_lock);
-	}
+	mutex_unlock(&block_class_lock);
 }
 }
 #endif /* CONFIG_PROC_FS */
 #endif /* CONFIG_PROC_FS */
 
 
@@ -309,6 +309,14 @@ int register_blkdev(unsigned int major, const char *name)
 		ret = major;
 		ret = major;
 	}
 	}
 
 
+	if (major >= BLKDEV_MAJOR_MAX) {
+		pr_err("register_blkdev: major requested (%d) is greater than the maximum (%d) for %s\n",
+		       major, BLKDEV_MAJOR_MAX, name);
+
+		ret = -EINVAL;
+		goto out;
+	}
+
 	p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
 	p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
 	if (p == NULL) {
 	if (p == NULL) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;

+ 12 - 2
drivers/android/Kconfig

@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
 config ANDROID_BINDER_DEVICES
 config ANDROID_BINDER_DEVICES
 	string "Android Binder devices"
 	string "Android Binder devices"
 	depends on ANDROID_BINDER_IPC
 	depends on ANDROID_BINDER_IPC
-	default "binder,hwbinder"
+	default "binder,hwbinder,vndbinder"
 	---help---
 	---help---
 	  Default value for the binder.devices parameter.
 	  Default value for the binder.devices parameter.
 
 
@@ -32,7 +32,7 @@ config ANDROID_BINDER_DEVICES
 	  therefore logically separated from the other devices.
 	  therefore logically separated from the other devices.
 
 
 config ANDROID_BINDER_IPC_32BIT
 config ANDROID_BINDER_IPC_32BIT
-	bool
+	bool "Use old (Android 4.4 and earlier) 32-bit binder API"
 	depends on !64BIT && ANDROID_BINDER_IPC
 	depends on !64BIT && ANDROID_BINDER_IPC
 	default y
 	default y
 	---help---
 	---help---
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
 
 
 	  Note that enabling this will break newer Android user-space.
 	  Note that enabling this will break newer Android user-space.
 
 
+config ANDROID_BINDER_IPC_SELFTEST
+	bool "Android Binder IPC Driver Selftest"
+	depends on ANDROID_BINDER_IPC
+	---help---
+	  This feature allows binder selftest to run.
+
+	  Binder selftest checks the allocation and free of binder buffers
+	  exhaustively with combinations of various buffer sizes and
+	  alignments.
+
 endif # if ANDROID
 endif # if ANDROID
 
 
 endmenu
 endmenu

+ 2 - 1
drivers/android/Makefile

@@ -1,3 +1,4 @@
 ccflags-y += -I$(src)			# needed for trace events
 ccflags-y += -I$(src)			# needed for trace events
 
 
-obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o

+ 2556 - 1312
drivers/android/binder.c

@@ -15,6 +15,40 @@
  *
  *
  */
  */
 
 
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ *    binder_proc_lock() and binder_proc_unlock() are
+ *    used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ *    binder_node_lock() and binder_node_unlock() are
+ *    used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ *    (proc->threads, proc->waiting_threads, proc->nodes)
+ *    and all todo lists associated with the binder_proc
+ *    (proc->todo, thread->todo, proc->delivered_death and
+ *    node->async_todo), as well as thread->transaction_stack
+ *    binder_inner_proc_lock() and binder_inner_proc_unlock()
+ *    are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
-#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/mutex.h>
 #include <linux/nsproxy.h>
 #include <linux/nsproxy.h>
@@ -35,30 +68,31 @@
 #include <linux/sched/mm.h>
 #include <linux/sched/mm.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
 #include <linux/pid_namespace.h>
 #include <linux/pid_namespace.h>
 #include <linux/security.h>
 #include <linux/security.h>
+#include <linux/spinlock.h>
 
 
 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
 #define BINDER_IPC_32BIT 1
 #define BINDER_IPC_32BIT 1
 #endif
 #endif
 
 
 #include <uapi/linux/android/binder.h>
 #include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
 #include "binder_trace.h"
 #include "binder_trace.h"
 
 
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
 static DEFINE_MUTEX(binder_deferred_lock);
 static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
 
 
 static HLIST_HEAD(binder_devices);
 static HLIST_HEAD(binder_devices);
 static HLIST_HEAD(binder_procs);
 static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
 static HLIST_HEAD(binder_dead_nodes);
 static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
 
 
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
 static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
 
 
 #define BINDER_DEBUG_ENTRY(name) \
 #define BINDER_DEBUG_ENTRY(name) \
 static int binder_##name##_open(struct inode *inode, struct file *file) \
 static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -88,8 +122,6 @@ BINDER_DEBUG_ENTRY(proc);
 
 
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 
 
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
 enum {
 enum {
 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
@@ -104,17 +136,13 @@ enum {
 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
-	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
-	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
-	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
+	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
 };
 };
 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
 
 
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 module_param_named(devices, binder_devices_param, charp, 0444);
 module_param_named(devices, binder_devices_param, charp, 0444);
 
 
@@ -171,26 +199,27 @@ enum binder_stat_types {
 };
 };
 
 
 struct binder_stats {
 struct binder_stats {
-	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
-	int bc[_IOC_NR(BC_REPLY_SG) + 1];
-	int obj_created[BINDER_STAT_COUNT];
-	int obj_deleted[BINDER_STAT_COUNT];
+	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+	atomic_t obj_created[BINDER_STAT_COUNT];
+	atomic_t obj_deleted[BINDER_STAT_COUNT];
 };
 };
 
 
 static struct binder_stats binder_stats;
 static struct binder_stats binder_stats;
 
 
 static inline void binder_stats_deleted(enum binder_stat_types type)
 static inline void binder_stats_deleted(enum binder_stat_types type)
 {
 {
-	binder_stats.obj_deleted[type]++;
+	atomic_inc(&binder_stats.obj_deleted[type]);
 }
 }
 
 
 static inline void binder_stats_created(enum binder_stat_types type)
 static inline void binder_stats_created(enum binder_stat_types type)
 {
 {
-	binder_stats.obj_created[type]++;
+	atomic_inc(&binder_stats.obj_created[type]);
 }
 }
 
 
 struct binder_transaction_log_entry {
 struct binder_transaction_log_entry {
 	int debug_id;
 	int debug_id;
+	int debug_id_done;
 	int call_type;
 	int call_type;
 	int from_proc;
 	int from_proc;
 	int from_thread;
 	int from_thread;
@@ -200,11 +229,14 @@ struct binder_transaction_log_entry {
 	int to_node;
 	int to_node;
 	int data_size;
 	int data_size;
 	int offsets_size;
 	int offsets_size;
+	int return_error_line;
+	uint32_t return_error;
+	uint32_t return_error_param;
 	const char *context_name;
 	const char *context_name;
 };
 };
 struct binder_transaction_log {
 struct binder_transaction_log {
-	int next;
-	int full;
+	atomic_t cur;
+	bool full;
 	struct binder_transaction_log_entry entry[32];
 	struct binder_transaction_log_entry entry[32];
 };
 };
 static struct binder_transaction_log binder_transaction_log;
 static struct binder_transaction_log binder_transaction_log;
@@ -214,19 +246,26 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
 	struct binder_transaction_log *log)
 	struct binder_transaction_log *log)
 {
 {
 	struct binder_transaction_log_entry *e;
 	struct binder_transaction_log_entry *e;
+	unsigned int cur = atomic_inc_return(&log->cur);
 
 
-	e = &log->entry[log->next];
-	memset(e, 0, sizeof(*e));
-	log->next++;
-	if (log->next == ARRAY_SIZE(log->entry)) {
-		log->next = 0;
+	if (cur >= ARRAY_SIZE(log->entry))
 		log->full = 1;
 		log->full = 1;
-	}
+	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+	WRITE_ONCE(e->debug_id_done, 0);
+	/*
+	 * write-barrier to synchronize access to e->debug_id_done.
+	 * We make sure the initialized 0 value is seen before
+	 * memset() other fields are zeroed by memset.
+	 */
+	smp_wmb();
+	memset(e, 0, sizeof(*e));
 	return e;
 	return e;
 }
 }
 
 
 struct binder_context {
 struct binder_context {
 	struct binder_node *binder_context_mgr_node;
 	struct binder_node *binder_context_mgr_node;
+	struct mutex context_mgr_node_lock;
+
 	kuid_t binder_context_mgr_uid;
 	kuid_t binder_context_mgr_uid;
 	const char *name;
 	const char *name;
 };
 };
@@ -237,11 +276,20 @@ struct binder_device {
 	struct binder_context context;
 	struct binder_context context;
 };
 };
 
 
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry:             node enqueued on list
+ * @type:              type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
 struct binder_work {
 struct binder_work {
 	struct list_head entry;
 	struct list_head entry;
+
 	enum {
 	enum {
 		BINDER_WORK_TRANSACTION = 1,
 		BINDER_WORK_TRANSACTION = 1,
 		BINDER_WORK_TRANSACTION_COMPLETE,
 		BINDER_WORK_TRANSACTION_COMPLETE,
+		BINDER_WORK_RETURN_ERROR,
 		BINDER_WORK_NODE,
 		BINDER_WORK_NODE,
 		BINDER_WORK_DEAD_BINDER,
 		BINDER_WORK_DEAD_BINDER,
 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -249,8 +297,72 @@ struct binder_work {
 	} type;
 	} type;
 };
 };
 
 
+struct binder_error {
+	struct binder_work work;
+	uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id:             unique ID for debugging
+ *                        (invariant after initialized)
+ * @lock:                 lock for node fields
+ * @work:                 worklist element for node work
+ *                        (protected by @proc->inner_lock)
+ * @rb_node:              element for proc->nodes tree
+ *                        (protected by @proc->inner_lock)
+ * @dead_node:            element for binder_dead_nodes list
+ *                        (protected by binder_dead_nodes_lock)
+ * @proc:                 binder_proc that owns this node
+ *                        (invariant after initialized)
+ * @refs:                 list of references on this node
+ *                        (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ *                        initiating a transaction
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @local_weak_refs:      weak user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @local_strong_refs:    strong user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @tmp_refs:             temporary kernel refs
+ *                        (protected by @proc->inner_lock while @proc
+ *                        is valid, and by binder_dead_nodes_lock
+ *                        if @proc is NULL. During inc/dec and node release
+ *                        it is also protected by @lock to provide safety
+ *                        as the node dies and @proc becomes NULL)
+ * @ptr:                  userspace pointer for node
+ *                        (invariant, no lock needed)
+ * @cookie:               userspace cookie for node
+ *                        (invariant, no lock needed)
+ * @has_strong_ref:       userspace notified of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @pending_strong_ref:   userspace has acked notification of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @has_weak_ref:         userspace notified of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @pending_weak_ref:     userspace has acked notification of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ *                        (protected by @lock)
+ * @accept_fds:           file descriptor operations supported for node
+ *                        (invariant after initialized)
+ * @min_priority:         minimum scheduling priority
+ *                        (invariant after initialized)
+ * @async_todo:           list of async work items
+ *                        (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
 struct binder_node {
 struct binder_node {
 	int debug_id;
 	int debug_id;
+	spinlock_t lock;
 	struct binder_work work;
 	struct binder_work work;
 	union {
 	union {
 		struct rb_node rb_node;
 		struct rb_node rb_node;
@@ -261,88 +373,167 @@ struct binder_node {
 	int internal_strong_refs;
 	int internal_strong_refs;
 	int local_weak_refs;
 	int local_weak_refs;
 	int local_strong_refs;
 	int local_strong_refs;
+	int tmp_refs;
 	binder_uintptr_t ptr;
 	binder_uintptr_t ptr;
 	binder_uintptr_t cookie;
 	binder_uintptr_t cookie;
-	unsigned has_strong_ref:1;
-	unsigned pending_strong_ref:1;
-	unsigned has_weak_ref:1;
-	unsigned pending_weak_ref:1;
-	unsigned has_async_transaction:1;
-	unsigned accept_fds:1;
-	unsigned min_priority:8;
+	struct {
+		/*
+		 * bitfield elements protected by
+		 * proc inner_lock
+		 */
+		u8 has_strong_ref:1;
+		u8 pending_strong_ref:1;
+		u8 has_weak_ref:1;
+		u8 pending_weak_ref:1;
+	};
+	struct {
+		/*
+		 * invariant after initialization
+		 */
+		u8 accept_fds:1;
+		u8 min_priority;
+	};
+	bool has_async_transaction;
 	struct list_head async_todo;
 	struct list_head async_todo;
 };
 };
 
 
 struct binder_ref_death {
 struct binder_ref_death {
+	/**
+	 * @work: worklist element for death notifications
+	 *        (protected by inner_lock of the proc that
+	 *        this ref belongs to)
+	 */
 	struct binder_work work;
 	struct binder_work work;
 	binder_uintptr_t cookie;
 	binder_uintptr_t cookie;
 };
 };
 
 
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id:        unique ID for the ref
+ * @desc:            unique userspace handle for ref
+ * @strong:          strong ref count (debugging only if not locked)
+ * @weak:            weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+	int debug_id;
+	uint32_t desc;
+	int strong;
+	int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data:        binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry:  list entry for node->refs list in target node
+ *               (protected by @node->lock)
+ * @proc:        binder_proc containing ref
+ * @node:        binder_node of target node. When cleaning up a
+ *               ref for deletion in binder_cleanup_ref, a non-NULL
+ *               @node indicates the node must be freed
+ * @death:       pointer to death notification (ref_death) if requested
+ *               (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
 struct binder_ref {
 struct binder_ref {
 	/* Lookups needed: */
 	/* Lookups needed: */
 	/*   node + proc => ref (transaction) */
 	/*   node + proc => ref (transaction) */
 	/*   desc + proc => ref (transaction, inc/dec ref) */
 	/*   desc + proc => ref (transaction, inc/dec ref) */
 	/*   node => refs + procs (proc exit) */
 	/*   node => refs + procs (proc exit) */
-	int debug_id;
+	struct binder_ref_data data;
 	struct rb_node rb_node_desc;
 	struct rb_node rb_node_desc;
 	struct rb_node rb_node_node;
 	struct rb_node rb_node_node;
 	struct hlist_node node_entry;
 	struct hlist_node node_entry;
 	struct binder_proc *proc;
 	struct binder_proc *proc;
 	struct binder_node *node;
 	struct binder_node *node;
-	uint32_t desc;
-	int strong;
-	int weak;
 	struct binder_ref_death *death;
 	struct binder_ref_death *death;
 };
 };
 
 
-struct binder_buffer {
-	struct list_head entry; /* free and allocated entries by address */
-	struct rb_node rb_node; /* free entry by size or allocated entry */
-				/* by address */
-	unsigned free:1;
-	unsigned allow_user_free:1;
-	unsigned async_transaction:1;
-	unsigned debug_id:29;
-
-	struct binder_transaction *transaction;
-
-	struct binder_node *target_node;
-	size_t data_size;
-	size_t offsets_size;
-	size_t extra_buffers_size;
-	uint8_t data[0];
-};
-
 enum binder_deferred_state {
 enum binder_deferred_state {
 	BINDER_DEFERRED_PUT_FILES    = 0x01,
 	BINDER_DEFERRED_PUT_FILES    = 0x01,
 	BINDER_DEFERRED_FLUSH        = 0x02,
 	BINDER_DEFERRED_FLUSH        = 0x02,
 	BINDER_DEFERRED_RELEASE      = 0x04,
 	BINDER_DEFERRED_RELEASE      = 0x04,
 };
 };
 
 
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node:            element for binder_procs list
+ * @threads:              rbtree of binder_threads in this proc
+ *                        (protected by @inner_lock)
+ * @nodes:                rbtree of binder nodes associated with
+ *                        this proc ordered by node->ptr
+ *                        (protected by @inner_lock)
+ * @refs_by_desc:         rbtree of refs ordered by ref->desc
+ *                        (protected by @outer_lock)
+ * @refs_by_node:         rbtree of refs ordered by ref->node
+ *                        (protected by @outer_lock)
+ * @waiting_threads:      threads currently waiting for proc work
+ *                        (protected by @inner_lock)
+ * @pid                   PID of group_leader of process
+ *                        (invariant after initialized)
+ * @tsk                   task_struct for group_leader of process
+ *                        (invariant after initialized)
+ * @files                 files_struct for process
+ *                        (invariant after initialized)
+ * @deferred_work_node:   element for binder_deferred_list
+ *                        (protected by binder_deferred_lock)
+ * @deferred_work:        bitmap of deferred work to perform
+ *                        (protected by binder_deferred_lock)
+ * @is_dead:              process is dead and awaiting free
+ *                        when outstanding transactions are cleaned up
+ *                        (protected by @inner_lock)
+ * @todo:                 list of work for this process
+ *                        (protected by @inner_lock)
+ * @wait:                 wait queue head to wait for proc work
+ *                        (invariant after initialized)
+ * @stats:                per-process binder statistics
+ *                        (atomics, no lock needed)
+ * @delivered_death:      list of delivered death notification
+ *                        (protected by @inner_lock)
+ * @max_threads:          cap on number of binder threads
+ *                        (protected by @inner_lock)
+ * @requested_threads:    number of binder threads requested but not
+ *                        yet started. In current implementation, can
+ *                        only be 0 or 1.
+ *                        (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ *                        (protected by @inner_lock)
+ * @tmp_ref:              temporary reference to indicate proc is in use
+ *                        (protected by @inner_lock)
+ * @default_priority:     default scheduler priority
+ *                        (invariant after initialized)
+ * @debugfs_entry:        debugfs node
+ * @alloc:                binder allocator bookkeeping
+ * @context:              binder_context for this proc
+ *                        (invariant after initialized)
+ * @inner_lock:           can nest under outer_lock and/or node lock
+ * @outer_lock:           no nesting under innor or node lock
+ *                        Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
 struct binder_proc {
 struct binder_proc {
 	struct hlist_node proc_node;
 	struct hlist_node proc_node;
 	struct rb_root threads;
 	struct rb_root threads;
 	struct rb_root nodes;
 	struct rb_root nodes;
 	struct rb_root refs_by_desc;
 	struct rb_root refs_by_desc;
 	struct rb_root refs_by_node;
 	struct rb_root refs_by_node;
+	struct list_head waiting_threads;
 	int pid;
 	int pid;
-	struct vm_area_struct *vma;
-	struct mm_struct *vma_vm_mm;
 	struct task_struct *tsk;
 	struct task_struct *tsk;
 	struct files_struct *files;
 	struct files_struct *files;
 	struct hlist_node deferred_work_node;
 	struct hlist_node deferred_work_node;
 	int deferred_work;
 	int deferred_work;
-	void *buffer;
-	ptrdiff_t user_buffer_offset;
-
-	struct list_head buffers;
-	struct rb_root free_buffers;
-	struct rb_root allocated_buffers;
-	size_t free_async_space;
+	bool is_dead;
 
 
-	struct page **pages;
-	size_t buffer_size;
-	uint32_t buffer_free;
 	struct list_head todo;
 	struct list_head todo;
 	wait_queue_head_t wait;
 	wait_queue_head_t wait;
 	struct binder_stats stats;
 	struct binder_stats stats;
@@ -350,10 +541,13 @@ struct binder_proc {
 	int max_threads;
 	int max_threads;
 	int requested_threads;
 	int requested_threads;
 	int requested_threads_started;
 	int requested_threads_started;
-	int ready_threads;
+	int tmp_ref;
 	long default_priority;
 	long default_priority;
 	struct dentry *debugfs_entry;
 	struct dentry *debugfs_entry;
+	struct binder_alloc alloc;
 	struct binder_context *context;
 	struct binder_context *context;
+	spinlock_t inner_lock;
+	spinlock_t outer_lock;
 };
 };
 
 
 enum {
 enum {
@@ -362,22 +556,58 @@ enum {
 	BINDER_LOOPER_STATE_EXITED      = 0x04,
 	BINDER_LOOPER_STATE_EXITED      = 0x04,
 	BINDER_LOOPER_STATE_INVALID     = 0x08,
 	BINDER_LOOPER_STATE_INVALID     = 0x08,
 	BINDER_LOOPER_STATE_WAITING     = 0x10,
 	BINDER_LOOPER_STATE_WAITING     = 0x10,
-	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+	BINDER_LOOPER_STATE_POLL        = 0x20,
 };
 };
 
 
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc:                 binder process for this thread
+ *                        (invariant after initialization)
+ * @rb_node:              element for proc->threads rbtree
+ *                        (protected by @proc->inner_lock)
+ * @waiting_thread_node:  element for @proc->waiting_threads list
+ *                        (protected by @proc->inner_lock)
+ * @pid:                  PID for this thread
+ *                        (invariant after initialization)
+ * @looper:               bitmap of looping state
+ *                        (only accessed by this thread)
+ * @looper_needs_return:  looping thread needs to exit driver
+ *                        (no lock needed)
+ * @transaction_stack:    stack of in-progress transactions for this thread
+ *                        (protected by @proc->inner_lock)
+ * @todo:                 list of work to do for this thread
+ *                        (protected by @proc->inner_lock)
+ * @return_error:         transaction errors reported by this thread
+ *                        (only accessed by this thread)
+ * @reply_error:          transaction errors reported by target thread
+ *                        (protected by @proc->inner_lock)
+ * @wait:                 wait queue for thread work
+ * @stats:                per-thread statistics
+ *                        (atomics, no lock needed)
+ * @tmp_ref:              temporary reference to indicate thread is in use
+ *                        (atomic since @proc->inner_lock cannot
+ *                        always be acquired)
+ * @is_dead:              thread is dead and awaiting free
+ *                        when outstanding transactions are cleaned up
+ *                        (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
 struct binder_thread {
 struct binder_thread {
 	struct binder_proc *proc;
 	struct binder_proc *proc;
 	struct rb_node rb_node;
 	struct rb_node rb_node;
+	struct list_head waiting_thread_node;
 	int pid;
 	int pid;
-	int looper;
+	int looper;              /* only modified by this thread */
+	bool looper_need_return; /* can be written by other thread */
 	struct binder_transaction *transaction_stack;
 	struct binder_transaction *transaction_stack;
 	struct list_head todo;
 	struct list_head todo;
-	uint32_t return_error; /* Write failed, return error code in read buf */
-	uint32_t return_error2; /* Write failed, return error code in read */
-		/* buffer. Used when sending a reply to a dead process that */
-		/* we are also waiting on */
+	struct binder_error return_error;
+	struct binder_error reply_error;
 	wait_queue_head_t wait;
 	wait_queue_head_t wait;
 	struct binder_stats stats;
 	struct binder_stats stats;
+	atomic_t tmp_ref;
+	bool is_dead;
 };
 };
 
 
 struct binder_transaction {
 struct binder_transaction {
@@ -397,10 +627,253 @@ struct binder_transaction {
 	long	priority;
 	long	priority;
 	long	saved_priority;
 	long	saved_priority;
 	kuid_t	sender_euid;
 	kuid_t	sender_euid;
+	/**
+	 * @lock:  protects @from, @to_proc, and @to_thread
+	 *
+	 * @from, @to_proc, and @to_thread can be set to NULL
+	 * during thread teardown
+	 */
+	spinlock_t lock;
 };
 };
 
 
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc:         struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc:         struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc:         struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc:         struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node:         struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node:         struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node:         struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	spin_lock(&node->lock);
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node:         struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+	struct binder_proc *proc = node->proc;
+
+	binder_debug(BINDER_DEBUG_SPINLOCKS,
+		     "%s: line=%d\n", __func__, line);
+	if (proc)
+		binder_inner_proc_unlock(proc);
+	spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+	return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc:       binder_proc associated with list
+ * @list:	list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+				  struct list_head *list)
+{
+	bool ret;
+
+	binder_inner_proc_lock(proc);
+	ret = binder_worklist_empty_ilocked(list);
+	binder_inner_proc_unlock(proc);
+	return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+			   struct list_head *target_list)
+{
+	BUG_ON(target_list == NULL);
+	BUG_ON(work->entry.next && !list_empty(&work->entry));
+	list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc:         binder_proc associated with list
+ * @work:         struct binder_work to add to list
+ * @target_list:  list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+		    struct binder_work *work,
+		    struct list_head *target_list)
+{
+	binder_inner_proc_lock(proc);
+	binder_enqueue_work_ilocked(work, target_list);
+	binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+	list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc:         binder_proc associated with list
+ * @work:         struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+	binder_inner_proc_lock(proc);
+	binder_dequeue_work_ilocked(work);
+	binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+					struct list_head *list)
+{
+	struct binder_work *w;
+
+	w = list_first_entry_or_null(list, struct binder_work, entry);
+	if (w)
+		list_del_init(&w->entry);
+	return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc:         binder_proc associated with list
+ * @list:         list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+					struct binder_proc *proc,
+					struct list_head *list)
+{
+	struct binder_work *w;
+
+	binder_inner_proc_lock(proc);
+	w = binder_dequeue_work_head_ilocked(list);
+	binder_inner_proc_unlock(proc);
+	return w;
+}
+
 static void
 static void
 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 
 
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
 {
@@ -451,484 +924,206 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 	return retval;
 	return retval;
 }
 }
 
 
-static inline void binder_lock(const char *tag)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+				    bool do_proc_work)
 {
 {
-	trace_binder_lock(tag);
-	mutex_lock(&binder_main_lock);
-	trace_binder_locked(tag);
+	return !binder_worklist_empty_ilocked(&thread->todo) ||
+		thread->looper_need_return ||
+		(do_proc_work &&
+		 !binder_worklist_empty_ilocked(&thread->proc->todo));
 }
 }
 
 
-static inline void binder_unlock(const char *tag)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
 {
 {
-	trace_binder_unlock(tag);
-	mutex_unlock(&binder_main_lock);
-}
+	bool has_work;
 
 
-static void binder_set_nice(long nice)
-{
-	long min_nice;
+	binder_inner_proc_lock(thread->proc);
+	has_work = binder_has_work_ilocked(thread, do_proc_work);
+	binder_inner_proc_unlock(thread->proc);
 
 
-	if (can_nice(current, nice)) {
-		set_user_nice(current, nice);
-		return;
-	}
-	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
-	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-		     "%d: nice value %ld not allowed use %ld instead\n",
-		      current->pid, nice, min_nice);
-	set_user_nice(current, min_nice);
-	if (min_nice <= MAX_NICE)
-		return;
-	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+	return has_work;
 }
 }
 
 
-static size_t binder_buffer_size(struct binder_proc *proc,
-				 struct binder_buffer *buffer)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
 {
 {
-	if (list_is_last(&buffer->entry, &proc->buffers))
-		return proc->buffer + proc->buffer_size - (void *)buffer->data;
-	return (size_t)list_entry(buffer->entry.next,
-			  struct binder_buffer, entry) - (size_t)buffer->data;
+	return !thread->transaction_stack &&
+		binder_worklist_empty_ilocked(&thread->todo) &&
+		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+				   BINDER_LOOPER_STATE_REGISTERED));
 }
 }
 
 
-static void binder_insert_free_buffer(struct binder_proc *proc,
-				      struct binder_buffer *new_buffer)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+					       bool sync)
 {
 {
-	struct rb_node **p = &proc->free_buffers.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_buffer *buffer;
-	size_t buffer_size;
-	size_t new_buffer_size;
+	struct rb_node *n;
+	struct binder_thread *thread;
+
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+		thread = rb_entry(n, struct binder_thread, rb_node);
+		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+		    binder_available_for_proc_work_ilocked(thread)) {
+			if (sync)
+				wake_up_interruptible_sync(&thread->wait);
+			else
+				wake_up_interruptible(&thread->wait);
+		}
+	}
+}
 
 
-	BUG_ON(!new_buffer->free);
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc:	process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return:	If there's a thread currently waiting for process work,
+ *		returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
+{
+	struct binder_thread *thread;
 
 
-	new_buffer_size = binder_buffer_size(proc, new_buffer);
+	assert_spin_locked(&proc->inner_lock);
+	thread = list_first_entry_or_null(&proc->waiting_threads,
+					  struct binder_thread,
+					  waiting_thread_node);
 
 
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: add free buffer, size %zd, at %p\n",
-		      proc->pid, new_buffer_size, new_buffer);
+	if (thread)
+		list_del_init(&thread->waiting_thread_node);
 
 
-	while (*p) {
-		parent = *p;
-		buffer = rb_entry(parent, struct binder_buffer, rb_node);
-		BUG_ON(!buffer->free);
+	return thread;
+}
 
 
-		buffer_size = binder_buffer_size(proc, buffer);
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc:	process to wake up a thread in
+ * @thread:	specific thread to wake-up (may be NULL)
+ * @sync:	whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+					 struct binder_thread *thread,
+					 bool sync)
+{
+	assert_spin_locked(&proc->inner_lock);
 
 
-		if (new_buffer_size < buffer_size)
-			p = &parent->rb_left;
+	if (thread) {
+		if (sync)
+			wake_up_interruptible_sync(&thread->wait);
 		else
 		else
-			p = &parent->rb_right;
+			wake_up_interruptible(&thread->wait);
+		return;
 	}
 	}
-	rb_link_node(&new_buffer->rb_node, parent, p);
-	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+
+	/* Didn't find a thread waiting for proc work; this can happen
+	 * in two scenarios:
+	 * 1. All threads are busy handling transactions
+	 *    In that case, one of those threads should call back into
+	 *    the kernel driver soon and pick up this work.
+	 * 2. Threads are using the (e)poll interface, in which case
+	 *    they may be blocked on the waitqueue without having been
+	 *    added to waiting_threads. For this case, we just iterate
+	 *    over all threads not handling transaction work, and
+	 *    wake them all up. We wake all because we don't know whether
+	 *    a thread that called into (e)poll is handling non-binder
+	 *    work currently.
+	 */
+	binder_wakeup_poll_threads_ilocked(proc, sync);
 }
 }
 
 
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
-					   struct binder_buffer *new_buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
 {
 {
-	struct rb_node **p = &proc->allocated_buffers.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_buffer *buffer;
+	struct binder_thread *thread = binder_select_thread_ilocked(proc);
 
 
-	BUG_ON(new_buffer->free);
+	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
+}
 
 
-	while (*p) {
-		parent = *p;
-		buffer = rb_entry(parent, struct binder_buffer, rb_node);
-		BUG_ON(buffer->free);
+static void binder_set_nice(long nice)
+{
+	long min_nice;
 
 
-		if (new_buffer < buffer)
-			p = &parent->rb_left;
-		else if (new_buffer > buffer)
-			p = &parent->rb_right;
-		else
-			BUG();
+	if (can_nice(current, nice)) {
+		set_user_nice(current, nice);
+		return;
 	}
 	}
-	rb_link_node(&new_buffer->rb_node, parent, p);
-	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
+	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+		     "%d: nice value %ld not allowed use %ld instead\n",
+		      current->pid, nice, min_nice);
+	set_user_nice(current, min_nice);
+	if (min_nice <= MAX_NICE)
+		return;
+	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
 }
 }
 
 
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
-						  uintptr_t user_ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+						   binder_uintptr_t ptr)
 {
 {
-	struct rb_node *n = proc->allocated_buffers.rb_node;
-	struct binder_buffer *buffer;
-	struct binder_buffer *kern_ptr;
+	struct rb_node *n = proc->nodes.rb_node;
+	struct binder_node *node;
 
 
-	kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
-		- offsetof(struct binder_buffer, data));
+	assert_spin_locked(&proc->inner_lock);
 
 
 	while (n) {
 	while (n) {
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-		BUG_ON(buffer->free);
+		node = rb_entry(n, struct binder_node, rb_node);
 
 
-		if (kern_ptr < buffer)
+		if (ptr < node->ptr)
 			n = n->rb_left;
 			n = n->rb_left;
-		else if (kern_ptr > buffer)
+		else if (ptr > node->ptr)
 			n = n->rb_right;
 			n = n->rb_right;
-		else
-			return buffer;
+		else {
+			/*
+			 * take an implicit weak reference
+			 * to ensure node stays alive until
+			 * call to binder_put_node()
+			 */
+			binder_inc_node_tmpref_ilocked(node);
+			return node;
+		}
 	}
 	}
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
-				    void *start, void *end,
-				    struct vm_area_struct *vma)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+					   binder_uintptr_t ptr)
 {
 {
-	void *page_addr;
-	unsigned long user_page_addr;
-	struct page **page;
-	struct mm_struct *mm;
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: %s pages %p-%p\n", proc->pid,
-		     allocate ? "allocate" : "free", start, end);
-
-	if (end <= start)
-		return 0;
+	struct binder_node *node;
 
 
-	trace_binder_update_page_range(proc, allocate, start, end);
+	binder_inner_proc_lock(proc);
+	node = binder_get_node_ilocked(proc, ptr);
+	binder_inner_proc_unlock(proc);
+	return node;
+}
 
 
-	if (vma)
-		mm = NULL;
-	else
-		mm = get_task_mm(proc->tsk);
-
-	if (mm) {
-		down_write(&mm->mmap_sem);
-		vma = proc->vma;
-		if (vma && mm != proc->vma_vm_mm) {
-			pr_err("%d: vma mm and task mm mismatch\n",
-				proc->pid);
-			vma = NULL;
-		}
-	}
-
-	if (allocate == 0)
-		goto free_range;
-
-	if (vma == NULL) {
-		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
-			proc->pid);
-		goto err_no_vma;
-	}
-
-	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-		int ret;
-
-		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-
-		BUG_ON(*page);
-		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-		if (*page == NULL) {
-			pr_err("%d: binder_alloc_buf failed for page at %p\n",
-				proc->pid, page_addr);
-			goto err_alloc_page_failed;
-		}
-		ret = map_kernel_range_noflush((unsigned long)page_addr,
-					PAGE_SIZE, PAGE_KERNEL, page);
-		flush_cache_vmap((unsigned long)page_addr,
-				(unsigned long)page_addr + PAGE_SIZE);
-		if (ret != 1) {
-			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
-			       proc->pid, page_addr);
-			goto err_map_kernel_failed;
-		}
-		user_page_addr =
-			(uintptr_t)page_addr + proc->user_buffer_offset;
-		ret = vm_insert_page(vma, user_page_addr, page[0]);
-		if (ret) {
-			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
-			       proc->pid, user_page_addr);
-			goto err_vm_insert_page_failed;
-		}
-		/* vm_insert_page does not seem to increment the refcount */
-	}
-	if (mm) {
-		up_write(&mm->mmap_sem);
-		mmput(mm);
-	}
-	return 0;
-
-free_range:
-	for (page_addr = end - PAGE_SIZE; page_addr >= start;
-	     page_addr -= PAGE_SIZE) {
-		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-		if (vma)
-			zap_page_range(vma, (uintptr_t)page_addr +
-				proc->user_buffer_offset, PAGE_SIZE);
-err_vm_insert_page_failed:
-		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
-		__free_page(*page);
-		*page = NULL;
-err_alloc_page_failed:
-		;
-	}
-err_no_vma:
-	if (mm) {
-		up_write(&mm->mmap_sem);
-		mmput(mm);
-	}
-	return -ENOMEM;
-}
-
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
-					      size_t data_size,
-					      size_t offsets_size,
-					      size_t extra_buffers_size,
-					      int is_async)
-{
-	struct rb_node *n = proc->free_buffers.rb_node;
-	struct binder_buffer *buffer;
-	size_t buffer_size;
-	struct rb_node *best_fit = NULL;
-	void *has_page_addr;
-	void *end_page_addr;
-	size_t size, data_offsets_size;
-
-	if (proc->vma == NULL) {
-		pr_err("%d: binder_alloc_buf, no vma\n",
-		       proc->pid);
-		return NULL;
-	}
-
-	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
-		ALIGN(offsets_size, sizeof(void *));
-
-	if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
-		binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
-				proc->pid, data_size, offsets_size);
-		return NULL;
-	}
-	size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
-	if (size < data_offsets_size || size < extra_buffers_size) {
-		binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
-				  proc->pid, extra_buffers_size);
-		return NULL;
-	}
-	if (is_async &&
-	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
-			      proc->pid, size);
-		return NULL;
-	}
-
-	while (n) {
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-		BUG_ON(!buffer->free);
-		buffer_size = binder_buffer_size(proc, buffer);
-
-		if (size < buffer_size) {
-			best_fit = n;
-			n = n->rb_left;
-		} else if (size > buffer_size)
-			n = n->rb_right;
-		else {
-			best_fit = n;
-			break;
-		}
-	}
-	if (best_fit == NULL) {
-		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
-			proc->pid, size);
-		return NULL;
-	}
-	if (n == NULL) {
-		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
-		buffer_size = binder_buffer_size(proc, buffer);
-	}
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
-		      proc->pid, size, buffer, buffer_size);
-
-	has_page_addr =
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-	if (n == NULL) {
-		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
-			buffer_size = size; /* no room for other buffers */
-		else
-			buffer_size = size + sizeof(struct binder_buffer);
-	}
-	end_page_addr =
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
-	if (end_page_addr > has_page_addr)
-		end_page_addr = has_page_addr;
-	if (binder_update_page_range(proc, 1,
-	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
-		return NULL;
-
-	rb_erase(best_fit, &proc->free_buffers);
-	buffer->free = 0;
-	binder_insert_allocated_buffer(proc, buffer);
-	if (buffer_size != size) {
-		struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
-		list_add(&new_buffer->entry, &buffer->entry);
-		new_buffer->free = 1;
-		binder_insert_free_buffer(proc, new_buffer);
-	}
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_alloc_buf size %zd got %p\n",
-		      proc->pid, size, buffer);
-	buffer->data_size = data_size;
-	buffer->offsets_size = offsets_size;
-	buffer->extra_buffers_size = extra_buffers_size;
-	buffer->async_transaction = is_async;
-	if (is_async) {
-		proc->free_async_space -= size + sizeof(struct binder_buffer);
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-			     "%d: binder_alloc_buf size %zd async free %zd\n",
-			      proc->pid, size, proc->free_async_space);
-	}
-
-	return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
-	return (void *)((uintptr_t)buffer & PAGE_MASK);
-}
-
-static void *buffer_end_page(struct binder_buffer *buffer)
-{
-	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
-
-static void binder_delete_free_buffer(struct binder_proc *proc,
-				      struct binder_buffer *buffer)
-{
-	struct binder_buffer *prev, *next = NULL;
-	int free_page_end = 1;
-	int free_page_start = 1;
-
-	BUG_ON(proc->buffers.next == &buffer->entry);
-	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
-	BUG_ON(!prev->free);
-	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
-		free_page_start = 0;
-		if (buffer_end_page(prev) == buffer_end_page(buffer))
-			free_page_end = 0;
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %p share page with %p\n",
-			      proc->pid, buffer, prev);
-	}
-
-	if (!list_is_last(&buffer->entry, &proc->buffers)) {
-		next = list_entry(buffer->entry.next,
-				  struct binder_buffer, entry);
-		if (buffer_start_page(next) == buffer_end_page(buffer)) {
-			free_page_end = 0;
-			if (buffer_start_page(next) ==
-			    buffer_start_page(buffer))
-				free_page_start = 0;
-			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%d: merge free, buffer %p share page with %p\n",
-				      proc->pid, buffer, prev);
-		}
-	}
-	list_del(&buffer->entry);
-	if (free_page_start || free_page_end) {
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
-			     proc->pid, buffer, free_page_start ? "" : " end",
-			     free_page_end ? "" : " start", prev, next);
-		binder_update_page_range(proc, 0, free_page_start ?
-			buffer_start_page(buffer) : buffer_end_page(buffer),
-			(free_page_end ? buffer_end_page(buffer) :
-			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
-	}
-}
-
-static void binder_free_buf(struct binder_proc *proc,
-			    struct binder_buffer *buffer)
-{
-	size_t size, buffer_size;
-
-	buffer_size = binder_buffer_size(proc, buffer);
-
-	size = ALIGN(buffer->data_size, sizeof(void *)) +
-		ALIGN(buffer->offsets_size, sizeof(void *)) +
-		ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
-		      proc->pid, buffer, size, buffer_size);
-
-	BUG_ON(buffer->free);
-	BUG_ON(size > buffer_size);
-	BUG_ON(buffer->transaction != NULL);
-	BUG_ON((void *)buffer < proc->buffer);
-	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
-	if (buffer->async_transaction) {
-		proc->free_async_space += size + sizeof(struct binder_buffer);
-
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-			     "%d: binder_free_buf size %zd async free %zd\n",
-			      proc->pid, size, proc->free_async_space);
-	}
-
-	binder_update_page_range(proc, 0,
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
-		NULL);
-	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
-	buffer->free = 1;
-	if (!list_is_last(&buffer->entry, &proc->buffers)) {
-		struct binder_buffer *next = list_entry(buffer->entry.next,
-						struct binder_buffer, entry);
-
-		if (next->free) {
-			rb_erase(&next->rb_node, &proc->free_buffers);
-			binder_delete_free_buffer(proc, next);
-		}
-	}
-	if (proc->buffers.next != &buffer->entry) {
-		struct binder_buffer *prev = list_entry(buffer->entry.prev,
-						struct binder_buffer, entry);
-
-		if (prev->free) {
-			binder_delete_free_buffer(proc, buffer);
-			rb_erase(&prev->rb_node, &proc->free_buffers);
-			buffer = prev;
-		}
-	}
-	binder_insert_free_buffer(proc, buffer);
-}
-
-static struct binder_node *binder_get_node(struct binder_proc *proc,
-					   binder_uintptr_t ptr)
-{
-	struct rb_node *n = proc->nodes.rb_node;
-	struct binder_node *node;
-
-	while (n) {
-		node = rb_entry(n, struct binder_node, rb_node);
-
-		if (ptr < node->ptr)
-			n = n->rb_left;
-		else if (ptr > node->ptr)
-			n = n->rb_right;
-		else
-			return node;
-	}
-	return NULL;
-}
-
-static struct binder_node *binder_new_node(struct binder_proc *proc,
-					   binder_uintptr_t ptr,
-					   binder_uintptr_t cookie)
+static struct binder_node *binder_init_node_ilocked(
+						struct binder_proc *proc,
+						struct binder_node *new_node,
+						struct flat_binder_object *fp)
 {
 {
 	struct rb_node **p = &proc->nodes.rb_node;
 	struct rb_node **p = &proc->nodes.rb_node;
 	struct rb_node *parent = NULL;
 	struct rb_node *parent = NULL;
 	struct binder_node *node;
 	struct binder_node *node;
+	binder_uintptr_t ptr = fp ? fp->binder : 0;
+	binder_uintptr_t cookie = fp ? fp->cookie : 0;
+	__u32 flags = fp ? fp->flags : 0;
+
+	assert_spin_locked(&proc->inner_lock);
 
 
 	while (*p) {
 	while (*p) {
+
 		parent = *p;
 		parent = *p;
 		node = rb_entry(parent, struct binder_node, rb_node);
 		node = rb_entry(parent, struct binder_node, rb_node);
 
 
@@ -936,33 +1131,74 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
 			p = &(*p)->rb_left;
 			p = &(*p)->rb_left;
 		else if (ptr > node->ptr)
 		else if (ptr > node->ptr)
 			p = &(*p)->rb_right;
 			p = &(*p)->rb_right;
-		else
-			return NULL;
+		else {
+			/*
+			 * A matching node is already in
+			 * the rb tree. Abandon the init
+			 * and return it.
+			 */
+			binder_inc_node_tmpref_ilocked(node);
+			return node;
+		}
 	}
 	}
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (node == NULL)
-		return NULL;
+	node = new_node;
 	binder_stats_created(BINDER_STAT_NODE);
 	binder_stats_created(BINDER_STAT_NODE);
+	node->tmp_refs++;
 	rb_link_node(&node->rb_node, parent, p);
 	rb_link_node(&node->rb_node, parent, p);
 	rb_insert_color(&node->rb_node, &proc->nodes);
 	rb_insert_color(&node->rb_node, &proc->nodes);
-	node->debug_id = ++binder_last_id;
+	node->debug_id = atomic_inc_return(&binder_last_id);
 	node->proc = proc;
 	node->proc = proc;
 	node->ptr = ptr;
 	node->ptr = ptr;
 	node->cookie = cookie;
 	node->cookie = cookie;
 	node->work.type = BINDER_WORK_NODE;
 	node->work.type = BINDER_WORK_NODE;
+	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
 	INIT_LIST_HEAD(&node->work.entry);
 	INIT_LIST_HEAD(&node->async_todo);
 	INIT_LIST_HEAD(&node->async_todo);
 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 		     "%d:%d node %d u%016llx c%016llx created\n",
 		     "%d:%d node %d u%016llx c%016llx created\n",
 		     proc->pid, current->pid, node->debug_id,
 		     proc->pid, current->pid, node->debug_id,
 		     (u64)node->ptr, (u64)node->cookie);
 		     (u64)node->ptr, (u64)node->cookie);
+
 	return node;
 	return node;
 }
 }
 
 
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
-			   struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+					   struct flat_binder_object *fp)
+{
+	struct binder_node *node;
+	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+	if (!new_node)
+		return NULL;
+	binder_inner_proc_lock(proc);
+	node = binder_init_node_ilocked(proc, new_node, fp);
+	binder_inner_proc_unlock(proc);
+	if (node != new_node)
+		/*
+		 * The node was already added by another thread
+		 */
+		kfree(new_node);
+
+	return node;
+}
+
+static void binder_free_node(struct binder_node *node)
 {
 {
+	kfree(node);
+	binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+				    int internal,
+				    struct list_head *target_list)
+{
+	struct binder_proc *proc = node->proc;
+
+	assert_spin_locked(&node->lock);
+	if (proc)
+		assert_spin_locked(&proc->inner_lock);
 	if (strong) {
 	if (strong) {
 		if (internal) {
 		if (internal) {
 			if (target_list == NULL &&
 			if (target_list == NULL &&
@@ -978,8 +1214,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
 		} else
 		} else
 			node->local_strong_refs++;
 			node->local_strong_refs++;
 		if (!node->has_strong_ref && target_list) {
 		if (!node->has_strong_ref && target_list) {
-			list_del_init(&node->work.entry);
-			list_add_tail(&node->work.entry, target_list);
+			binder_dequeue_work_ilocked(&node->work);
+			binder_enqueue_work_ilocked(&node->work, target_list);
 		}
 		}
 	} else {
 	} else {
 		if (!internal)
 		if (!internal)
@@ -990,58 +1226,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
 					node->debug_id);
 					node->debug_id);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			list_add_tail(&node->work.entry, target_list);
+			binder_enqueue_work_ilocked(&node->work, target_list);
 		}
 		}
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 
 
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+			   struct list_head *target_list)
+{
+	int ret;
+
+	binder_node_inner_lock(node);
+	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+	binder_node_inner_unlock(node);
+
+	return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+				     int strong, int internal)
 {
 {
+	struct binder_proc *proc = node->proc;
+
+	assert_spin_locked(&node->lock);
+	if (proc)
+		assert_spin_locked(&proc->inner_lock);
 	if (strong) {
 	if (strong) {
 		if (internal)
 		if (internal)
 			node->internal_strong_refs--;
 			node->internal_strong_refs--;
 		else
 		else
 			node->local_strong_refs--;
 			node->local_strong_refs--;
 		if (node->local_strong_refs || node->internal_strong_refs)
 		if (node->local_strong_refs || node->internal_strong_refs)
-			return 0;
+			return false;
 	} else {
 	} else {
 		if (!internal)
 		if (!internal)
 			node->local_weak_refs--;
 			node->local_weak_refs--;
-		if (node->local_weak_refs || !hlist_empty(&node->refs))
-			return 0;
+		if (node->local_weak_refs || node->tmp_refs ||
+				!hlist_empty(&node->refs))
+			return false;
 	}
 	}
-	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
 		if (list_empty(&node->work.entry)) {
 		if (list_empty(&node->work.entry)) {
-			list_add_tail(&node->work.entry, &node->proc->todo);
-			wake_up_interruptible(&node->proc->wait);
+			binder_enqueue_work_ilocked(&node->work, &proc->todo);
+			binder_wakeup_proc_ilocked(proc);
 		}
 		}
 	} else {
 	} else {
 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
-		    !node->local_weak_refs) {
-			list_del_init(&node->work.entry);
-			if (node->proc) {
-				rb_erase(&node->rb_node, &node->proc->nodes);
+		    !node->local_weak_refs && !node->tmp_refs) {
+			if (proc) {
+				binder_dequeue_work_ilocked(&node->work);
+				rb_erase(&node->rb_node, &proc->nodes);
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 					     "refless node %d deleted\n",
 					     "refless node %d deleted\n",
 					     node->debug_id);
 					     node->debug_id);
 			} else {
 			} else {
+				BUG_ON(!list_empty(&node->work.entry));
+				spin_lock(&binder_dead_nodes_lock);
+				/*
+				 * tmp_refs could have changed so
+				 * check it again
+				 */
+				if (node->tmp_refs) {
+					spin_unlock(&binder_dead_nodes_lock);
+					return false;
+				}
 				hlist_del(&node->dead_node);
 				hlist_del(&node->dead_node);
+				spin_unlock(&binder_dead_nodes_lock);
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 					     "dead node %d deleted\n",
 					     "dead node %d deleted\n",
 					     node->debug_id);
 					     node->debug_id);
 			}
 			}
-			kfree(node);
-			binder_stats_deleted(BINDER_STAT_NODE);
+			return true;
 		}
 		}
 	}
 	}
+	return false;
+}
 
 
-	return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+	bool free_node;
+
+	binder_node_inner_lock(node);
+	free_node = binder_dec_node_nilocked(node, strong, internal);
+	binder_node_inner_unlock(node);
+	if (free_node)
+		binder_free_node(node);
 }
 }
 
 
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+	/*
+	 * No call to binder_inc_node() is needed since we
+	 * don't need to inform userspace of any changes to
+	 * tmp_refs
+	 */
+	node->tmp_refs++;
+}
 
 
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-					 u32 desc, bool need_strong_ref)
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node:	node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+	binder_node_lock(node);
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	else
+		spin_lock(&binder_dead_nodes_lock);
+	binder_inc_node_tmpref_ilocked(node);
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+	else
+		spin_unlock(&binder_dead_nodes_lock);
+	binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node:	node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+	bool free_node;
+
+	binder_node_inner_lock(node);
+	if (!node->proc)
+		spin_lock(&binder_dead_nodes_lock);
+	node->tmp_refs--;
+	BUG_ON(node->tmp_refs < 0);
+	if (!node->proc)
+		spin_unlock(&binder_dead_nodes_lock);
+	/*
+	 * Call binder_dec_node() to check if all refcounts are 0
+	 * and cleanup is needed. Calling with strong=0 and internal=1
+	 * causes no actual reference to be released in binder_dec_node().
+	 * If that changes, a change is needed here too.
+	 */
+	free_node = binder_dec_node_nilocked(node, 0, 1);
+	binder_node_inner_unlock(node);
+	if (free_node)
+		binder_free_node(node);
+}
+
+static void binder_put_node(struct binder_node *node)
+{
+	binder_dec_node_tmpref(node);
+}
+
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+						 u32 desc, bool need_strong_ref)
 {
 {
 	struct rb_node *n = proc->refs_by_desc.rb_node;
 	struct rb_node *n = proc->refs_by_desc.rb_node;
 	struct binder_ref *ref;
 	struct binder_ref *ref;
@@ -1049,11 +1396,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
 	while (n) {
 	while (n) {
 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 
 
-		if (desc < ref->desc) {
+		if (desc < ref->data.desc) {
 			n = n->rb_left;
 			n = n->rb_left;
-		} else if (desc > ref->desc) {
+		} else if (desc > ref->data.desc) {
 			n = n->rb_right;
 			n = n->rb_right;
-		} else if (need_strong_ref && !ref->strong) {
+		} else if (need_strong_ref && !ref->data.strong) {
 			binder_user_error("tried to use weak ref as strong ref\n");
 			binder_user_error("tried to use weak ref as strong ref\n");
 			return NULL;
 			return NULL;
 		} else {
 		} else {
@@ -1063,14 +1410,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
 	return NULL;
 	return NULL;
 }
 }
 
 
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
-						  struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc:	binder_proc that owns the ref
+ * @node:	binder_node of target
+ * @new_ref:	newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return:	the ref for node. It is possible that another thread
+ *		allocated/initialized the ref first in which case the
+ *		returned ref would be different than the passed-in
+ *		new_ref. new_ref must be kfree'd by the caller in
+ *		this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+					struct binder_proc *proc,
+					struct binder_node *node,
+					struct binder_ref *new_ref)
 {
 {
-	struct rb_node *n;
+	struct binder_context *context = proc->context;
 	struct rb_node **p = &proc->refs_by_node.rb_node;
 	struct rb_node **p = &proc->refs_by_node.rb_node;
 	struct rb_node *parent = NULL;
 	struct rb_node *parent = NULL;
-	struct binder_ref *ref, *new_ref;
-	struct binder_context *context = proc->context;
+	struct binder_ref *ref;
+	struct rb_node *n;
 
 
 	while (*p) {
 	while (*p) {
 		parent = *p;
 		parent = *p;
@@ -1083,144 +1450,446 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
 		else
 		else
 			return ref;
 			return ref;
 	}
 	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-	if (new_ref == NULL)
+	if (!new_ref)
 		return NULL;
 		return NULL;
+
 	binder_stats_created(BINDER_STAT_REF);
 	binder_stats_created(BINDER_STAT_REF);
-	new_ref->debug_id = ++binder_last_id;
+	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
 	new_ref->proc = proc;
 	new_ref->proc = proc;
 	new_ref->node = node;
 	new_ref->node = node;
 	rb_link_node(&new_ref->rb_node_node, parent, p);
 	rb_link_node(&new_ref->rb_node_node, parent, p);
 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
 
 
-	new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
-	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
-		ref = rb_entry(n, struct binder_ref, rb_node_desc);
-		if (ref->desc > new_ref->desc)
-			break;
-		new_ref->desc = ref->desc + 1;
+	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		if (ref->data.desc > new_ref->data.desc)
+			break;
+		new_ref->data.desc = ref->data.desc + 1;
+	}
+
+	p = &proc->refs_by_desc.rb_node;
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+		if (new_ref->data.desc < ref->data.desc)
+			p = &(*p)->rb_left;
+		else if (new_ref->data.desc > ref->data.desc)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_ref->rb_node_desc, parent, p);
+	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+
+	binder_node_lock(node);
+	hlist_add_head(&new_ref->node_entry, &node->refs);
+
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "%d new ref %d desc %d for node %d\n",
+		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+		      node->debug_id);
+	binder_node_unlock(node);
+	return new_ref;
+}
+
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
+{
+	bool delete_node = false;
+
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "%d delete ref %d desc %d for node %d\n",
+		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
+		      ref->node->debug_id);
+
+	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+
+	binder_node_inner_lock(ref->node);
+	if (ref->data.strong)
+		binder_dec_node_nilocked(ref->node, 1, 1);
+
+	hlist_del(&ref->node_entry);
+	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+	binder_node_inner_unlock(ref->node);
+	/*
+	 * Clear ref->node unless we want the caller to free the node
+	 */
+	if (!delete_node) {
+		/*
+		 * The caller uses ref->node to determine
+		 * whether the node needs to be freed. Clear
+		 * it since the node is still alive.
+		 */
+		ref->node = NULL;
+	}
+
+	if (ref->death) {
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "%d delete ref %d desc %d has death notification\n",
+			      ref->proc->pid, ref->data.debug_id,
+			      ref->data.desc);
+		binder_dequeue_work(ref->proc, &ref->death->work);
+		binder_stats_deleted(BINDER_STAT_DEATH);
+	}
+	binder_stats_deleted(BINDER_STAT_REF);
+}
+
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref:         ref to be incremented
+ * @strong:      if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+				  struct list_head *target_list)
+{
+	int ret;
+
+	if (strong) {
+		if (ref->data.strong == 0) {
+			ret = binder_inc_node(ref->node, 1, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->data.strong++;
+	} else {
+		if (ref->data.weak == 0) {
+			ret = binder_inc_node(ref->node, 0, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->data.weak++;
+	}
+	return 0;
+}
+
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref:	ref to be decremented
+ * @strong:	if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
+{
+	if (strong) {
+		if (ref->data.strong == 0) {
+			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->data.debug_id,
+					  ref->data.desc, ref->data.strong,
+					  ref->data.weak);
+			return false;
+		}
+		ref->data.strong--;
+		if (ref->data.strong == 0)
+			binder_dec_node(ref->node, strong, 1);
+	} else {
+		if (ref->data.weak == 0) {
+			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->data.debug_id,
+					  ref->data.desc, ref->data.strong,
+					  ref->data.weak);
+			return false;
+		}
+		ref->data.weak--;
+	}
+	if (ref->data.strong == 0 && ref->data.weak == 0) {
+		binder_cleanup_ref_olocked(ref);
+		return true;
+	}
+	return false;
+}
+
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc:	proc containing the ref
+ * @desc:	the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata:	the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+		struct binder_proc *proc,
+		u32 desc, bool need_strong_ref,
+		struct binder_ref_data *rdata)
+{
+	struct binder_node *node;
+	struct binder_ref *ref;
+
+	binder_proc_lock(proc);
+	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+	if (!ref)
+		goto err_no_ref;
+	node = ref->node;
+	/*
+	 * Take an implicit reference on the node to ensure
+	 * it stays alive until the call to binder_put_node()
+	 */
+	binder_inc_node_tmpref(node);
+	if (rdata)
+		*rdata = ref->data;
+	binder_proc_unlock(proc);
+
+	return node;
+
+err_no_ref:
+	binder_proc_unlock(proc);
+	return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref:	ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+	if (ref->node)
+		binder_free_node(ref->node);
+	kfree(ref->death);
+	kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc:	proc containing the ref
+ * @desc:	the handle associated with the ref
+ * @increment:	true=inc reference, false=dec reference
+ * @strong:	true=strong reference, false=weak reference
+ * @rdata:	the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+		uint32_t desc, bool increment, bool strong,
+		struct binder_ref_data *rdata)
+{
+	int ret = 0;
+	struct binder_ref *ref;
+	bool delete_ref = false;
+
+	binder_proc_lock(proc);
+	ref = binder_get_ref_olocked(proc, desc, strong);
+	if (!ref) {
+		ret = -EINVAL;
+		goto err_no_ref;
 	}
 	}
+	if (increment)
+		ret = binder_inc_ref_olocked(ref, strong, NULL);
+	else
+		delete_ref = binder_dec_ref_olocked(ref, strong);
 
 
-	p = &proc->refs_by_desc.rb_node;
-	while (*p) {
-		parent = *p;
-		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+	if (rdata)
+		*rdata = ref->data;
+	binder_proc_unlock(proc);
 
 
-		if (new_ref->desc < ref->desc)
-			p = &(*p)->rb_left;
-		else if (new_ref->desc > ref->desc)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-	rb_link_node(&new_ref->rb_node_desc, parent, p);
-	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
-	if (node) {
-		hlist_add_head(&new_ref->node_entry, &node->refs);
+	if (delete_ref)
+		binder_free_ref(ref);
+	return ret;
 
 
-		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-			     "%d new ref %d desc %d for node %d\n",
-			      proc->pid, new_ref->debug_id, new_ref->desc,
-			      node->debug_id);
-	} else {
-		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-			     "%d new ref %d desc %d for dead node\n",
-			      proc->pid, new_ref->debug_id, new_ref->desc);
-	}
-	return new_ref;
+err_no_ref:
+	binder_proc_unlock(proc);
+	return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc:	proc containing the ref
+ * @desc:	the handle associated with the ref
+ * @strong:	true=strong reference, false=weak reference
+ * @rdata:	the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+		uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
 }
 }
 
 
-static void binder_delete_ref(struct binder_ref *ref)
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc:	 proc containing the ref
+ * @node:	 target node
+ * @strong:	 true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata:	 the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+			struct binder_node *node,
+			bool strong,
+			struct list_head *target_list,
+			struct binder_ref_data *rdata)
 {
 {
-	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-		     "%d delete ref %d desc %d for node %d\n",
-		      ref->proc->pid, ref->debug_id, ref->desc,
-		      ref->node->debug_id);
+	struct binder_ref *ref;
+	struct binder_ref *new_ref = NULL;
+	int ret = 0;
 
 
-	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
-	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
-	if (ref->strong)
-		binder_dec_node(ref->node, 1, 1);
-	hlist_del(&ref->node_entry);
-	binder_dec_node(ref->node, 0, 1);
-	if (ref->death) {
-		binder_debug(BINDER_DEBUG_DEAD_BINDER,
-			     "%d delete ref %d desc %d has death notification\n",
-			      ref->proc->pid, ref->debug_id, ref->desc);
-		list_del(&ref->death->work.entry);
-		kfree(ref->death);
-		binder_stats_deleted(BINDER_STAT_DEATH);
+	binder_proc_lock(proc);
+	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+	if (!ref) {
+		binder_proc_unlock(proc);
+		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+		if (!new_ref)
+			return -ENOMEM;
+		binder_proc_lock(proc);
+		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
 	}
 	}
-	kfree(ref);
-	binder_stats_deleted(BINDER_STAT_REF);
+	ret = binder_inc_ref_olocked(ref, strong, target_list);
+	*rdata = ref->data;
+	binder_proc_unlock(proc);
+	if (new_ref && ref != new_ref)
+		/*
+		 * Another thread created the ref first so
+		 * free the one we allocated
+		 */
+		kfree(new_ref);
+	return ret;
 }
 }
 
 
-static int binder_inc_ref(struct binder_ref *ref, int strong,
-			  struct list_head *target_list)
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+					   struct binder_transaction *t)
 {
 {
-	int ret;
+	BUG_ON(!target_thread);
+	assert_spin_locked(&target_thread->proc->inner_lock);
+	BUG_ON(target_thread->transaction_stack != t);
+	BUG_ON(target_thread->transaction_stack->from != target_thread);
+	target_thread->transaction_stack =
+		target_thread->transaction_stack->from_parent;
+	t->from = NULL;
+}
 
 
-	if (strong) {
-		if (ref->strong == 0) {
-			ret = binder_inc_node(ref->node, 1, 1, target_list);
-			if (ret)
-				return ret;
-		}
-		ref->strong++;
-	} else {
-		if (ref->weak == 0) {
-			ret = binder_inc_node(ref->node, 0, 1, target_list);
-			if (ret)
-				return ret;
-		}
-		ref->weak++;
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread:	thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+	/*
+	 * atomic is used to protect the counter value while
+	 * it cannot reach zero or thread->is_dead is false
+	 */
+	binder_inner_proc_lock(thread->proc);
+	atomic_dec(&thread->tmp_ref);
+	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+		binder_inner_proc_unlock(thread->proc);
+		binder_free_thread(thread);
+		return;
 	}
 	}
-	return 0;
+	binder_inner_proc_unlock(thread->proc);
 }
 }
 
 
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc:	proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+	binder_inner_proc_lock(proc);
+	proc->tmp_ref--;
+	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+			!proc->tmp_ref) {
+		binder_inner_proc_unlock(proc);
+		binder_free_proc(proc);
+		return;
+	}
+	binder_inner_proc_unlock(proc);
+}
 
 
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t:	binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+		struct binder_transaction *t)
 {
 {
-	if (strong) {
-		if (ref->strong == 0) {
-			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
-					  ref->proc->pid, ref->debug_id,
-					  ref->desc, ref->strong, ref->weak);
-			return -EINVAL;
-		}
-		ref->strong--;
-		if (ref->strong == 0) {
-			int ret;
+	struct binder_thread *from;
 
 
-			ret = binder_dec_node(ref->node, strong, 1);
-			if (ret)
-				return ret;
-		}
-	} else {
-		if (ref->weak == 0) {
-			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
-					  ref->proc->pid, ref->debug_id,
-					  ref->desc, ref->strong, ref->weak);
-			return -EINVAL;
-		}
-		ref->weak--;
-	}
-	if (ref->strong == 0 && ref->weak == 0)
-		binder_delete_ref(ref);
-	return 0;
+	spin_lock(&t->lock);
+	from = t->from;
+	if (from)
+		atomic_inc(&from->tmp_ref);
+	spin_unlock(&t->lock);
+	return from;
 }
 }
 
 
-static void binder_pop_transaction(struct binder_thread *target_thread,
-				   struct binder_transaction *t)
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t:	binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+		struct binder_transaction *t)
 {
 {
-	if (target_thread) {
-		BUG_ON(target_thread->transaction_stack != t);
-		BUG_ON(target_thread->transaction_stack->from != target_thread);
-		target_thread->transaction_stack =
-			target_thread->transaction_stack->from_parent;
-		t->from = NULL;
+	struct binder_thread *from;
+
+	from = binder_get_txn_from(t);
+	if (!from)
+		return NULL;
+	binder_inner_proc_lock(from->proc);
+	if (t->from) {
+		BUG_ON(from != t->from);
+		return from;
 	}
 	}
-	t->need_reply = 0;
+	binder_inner_proc_unlock(from->proc);
+	binder_thread_dec_tmpref(from);
+	return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
 	if (t->buffer)
 	if (t->buffer)
 		t->buffer->transaction = NULL;
 		t->buffer->transaction = NULL;
 	kfree(t);
 	kfree(t);
@@ -1235,30 +1904,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 
 
 	BUG_ON(t->flags & TF_ONE_WAY);
 	BUG_ON(t->flags & TF_ONE_WAY);
 	while (1) {
 	while (1) {
-		target_thread = t->from;
+		target_thread = binder_get_txn_from_and_acq_inner(t);
 		if (target_thread) {
 		if (target_thread) {
-			if (target_thread->return_error != BR_OK &&
-			   target_thread->return_error2 == BR_OK) {
-				target_thread->return_error2 =
-					target_thread->return_error;
-				target_thread->return_error = BR_OK;
-			}
-			if (target_thread->return_error == BR_OK) {
-				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-					     "send failed reply for transaction %d to %d:%d\n",
-					      t->debug_id,
-					      target_thread->proc->pid,
-					      target_thread->pid);
-
-				binder_pop_transaction(target_thread, t);
-				target_thread->return_error = error_code;
+			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+				     "send failed reply for transaction %d to %d:%d\n",
+				      t->debug_id,
+				      target_thread->proc->pid,
+				      target_thread->pid);
+
+			binder_pop_transaction_ilocked(target_thread, t);
+			if (target_thread->reply_error.cmd == BR_OK) {
+				target_thread->reply_error.cmd = error_code;
+				binder_enqueue_work_ilocked(
+					&target_thread->reply_error.work,
+					&target_thread->todo);
 				wake_up_interruptible(&target_thread->wait);
 				wake_up_interruptible(&target_thread->wait);
 			} else {
 			} else {
-				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
-					target_thread->proc->pid,
-					target_thread->pid,
-					target_thread->return_error);
+				WARN(1, "Unexpected reply error: %u\n",
+						target_thread->reply_error.cmd);
 			}
 			}
+			binder_inner_proc_unlock(target_thread->proc);
+			binder_thread_dec_tmpref(target_thread);
+			binder_free_transaction(t);
 			return;
 			return;
 		}
 		}
 		next = t->from_parent;
 		next = t->from_parent;
@@ -1267,7 +1934,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 			     "send failed reply for transaction %d, target dead\n",
 			     "send failed reply for transaction %d, target dead\n",
 			     t->debug_id);
 			     t->debug_id);
 
 
-		binder_pop_transaction(target_thread, t);
+		binder_free_transaction(t);
 		if (next == NULL) {
 		if (next == NULL) {
 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
 				     "reply failed, no target thread at root\n");
 				     "reply failed, no target thread at root\n");
@@ -1476,24 +2143,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 				     node->debug_id, (u64)node->ptr);
 				     node->debug_id, (u64)node->ptr);
 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
 					0);
 					0);
+			binder_put_node(node);
 		} break;
 		} break;
 		case BINDER_TYPE_HANDLE:
 		case BINDER_TYPE_HANDLE:
 		case BINDER_TYPE_WEAK_HANDLE: {
 		case BINDER_TYPE_WEAK_HANDLE: {
 			struct flat_binder_object *fp;
 			struct flat_binder_object *fp;
-			struct binder_ref *ref;
+			struct binder_ref_data rdata;
+			int ret;
 
 
 			fp = to_flat_binder_object(hdr);
 			fp = to_flat_binder_object(hdr);
-			ref = binder_get_ref(proc, fp->handle,
-					     hdr->type == BINDER_TYPE_HANDLE);
-			if (ref == NULL) {
-				pr_err("transaction release %d bad handle %d\n",
-				 debug_id, fp->handle);
+			ret = binder_dec_ref_for_handle(proc, fp->handle,
+				hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+			if (ret) {
+				pr_err("transaction release %d bad handle %d, ret = %d\n",
+				 debug_id, fp->handle, ret);
 				break;
 				break;
 			}
 			}
 			binder_debug(BINDER_DEBUG_TRANSACTION,
 			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        ref %d desc %d (node %d)\n",
-				     ref->debug_id, ref->desc, ref->node->debug_id);
-			binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+				     "        ref %d desc %d\n",
+				     rdata.debug_id, rdata.desc);
 		} break;
 		} break;
 
 
 		case BINDER_TYPE_FD: {
 		case BINDER_TYPE_FD: {
@@ -1532,7 +2201,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 			 * back to kernel address space to access it
 			 * back to kernel address space to access it
 			 */
 			 */
 			parent_buffer = parent->buffer -
 			parent_buffer = parent->buffer -
-				proc->user_buffer_offset;
+				binder_alloc_get_user_buffer_offset(
+						&proc->alloc);
 
 
 			fd_buf_size = sizeof(u32) * fda->num_fds;
 			fd_buf_size = sizeof(u32) * fda->num_fds;
 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1564,102 +2234,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
 				   struct binder_thread *thread)
 				   struct binder_thread *thread)
 {
 {
 	struct binder_node *node;
 	struct binder_node *node;
-	struct binder_ref *ref;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *target_proc = t->to_proc;
 	struct binder_proc *target_proc = t->to_proc;
+	struct binder_ref_data rdata;
+	int ret = 0;
 
 
 	node = binder_get_node(proc, fp->binder);
 	node = binder_get_node(proc, fp->binder);
 	if (!node) {
 	if (!node) {
-		node = binder_new_node(proc, fp->binder, fp->cookie);
+		node = binder_new_node(proc, fp);
 		if (!node)
 		if (!node)
 			return -ENOMEM;
 			return -ENOMEM;
-
-		node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
-		node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
 	}
 	}
 	if (fp->cookie != node->cookie) {
 	if (fp->cookie != node->cookie) {
 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
 				  proc->pid, thread->pid, (u64)fp->binder,
 				  proc->pid, thread->pid, (u64)fp->binder,
 				  node->debug_id, (u64)fp->cookie,
 				  node->debug_id, (u64)fp->cookie,
 				  (u64)node->cookie);
 				  (u64)node->cookie);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto done;
+	}
+	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+		ret = -EPERM;
+		goto done;
 	}
 	}
-	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
-		return -EPERM;
 
 
-	ref = binder_get_ref_for_node(target_proc, node);
-	if (!ref)
-		return -EINVAL;
+	ret = binder_inc_ref_for_node(target_proc, node,
+			fp->hdr.type == BINDER_TYPE_BINDER,
+			&thread->todo, &rdata);
+	if (ret)
+		goto done;
 
 
 	if (fp->hdr.type == BINDER_TYPE_BINDER)
 	if (fp->hdr.type == BINDER_TYPE_BINDER)
 		fp->hdr.type = BINDER_TYPE_HANDLE;
 		fp->hdr.type = BINDER_TYPE_HANDLE;
 	else
 	else
 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
 	fp->binder = 0;
 	fp->binder = 0;
-	fp->handle = ref->desc;
+	fp->handle = rdata.desc;
 	fp->cookie = 0;
 	fp->cookie = 0;
-	binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
 
 
-	trace_binder_transaction_node_to_ref(t, node, ref);
+	trace_binder_transaction_node_to_ref(t, node, &rdata);
 	binder_debug(BINDER_DEBUG_TRANSACTION,
 	binder_debug(BINDER_DEBUG_TRANSACTION,
 		     "        node %d u%016llx -> ref %d desc %d\n",
 		     "        node %d u%016llx -> ref %d desc %d\n",
 		     node->debug_id, (u64)node->ptr,
 		     node->debug_id, (u64)node->ptr,
-		     ref->debug_id, ref->desc);
-
-	return 0;
+		     rdata.debug_id, rdata.desc);
+done:
+	binder_put_node(node);
+	return ret;
 }
 }
 
 
 static int binder_translate_handle(struct flat_binder_object *fp,
 static int binder_translate_handle(struct flat_binder_object *fp,
 				   struct binder_transaction *t,
 				   struct binder_transaction *t,
 				   struct binder_thread *thread)
 				   struct binder_thread *thread)
 {
 {
-	struct binder_ref *ref;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *target_proc = t->to_proc;
 	struct binder_proc *target_proc = t->to_proc;
+	struct binder_node *node;
+	struct binder_ref_data src_rdata;
+	int ret = 0;
 
 
-	ref = binder_get_ref(proc, fp->handle,
-			     fp->hdr.type == BINDER_TYPE_HANDLE);
-	if (!ref) {
+	node = binder_get_node_from_ref(proc, fp->handle,
+			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+	if (!node) {
 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
 				  proc->pid, thread->pid, fp->handle);
 				  proc->pid, thread->pid, fp->handle);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
-		return -EPERM;
+	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+		ret = -EPERM;
+		goto done;
+	}
 
 
-	if (ref->node->proc == target_proc) {
+	binder_node_lock(node);
+	if (node->proc == target_proc) {
 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
 			fp->hdr.type = BINDER_TYPE_BINDER;
 			fp->hdr.type = BINDER_TYPE_BINDER;
 		else
 		else
 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
-		fp->binder = ref->node->ptr;
-		fp->cookie = ref->node->cookie;
-		binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
-				0, NULL);
-		trace_binder_transaction_ref_to_node(t, ref);
+		fp->binder = node->ptr;
+		fp->cookie = node->cookie;
+		if (node->proc)
+			binder_inner_proc_lock(node->proc);
+		binder_inc_node_nilocked(node,
+					 fp->hdr.type == BINDER_TYPE_BINDER,
+					 0, NULL);
+		if (node->proc)
+			binder_inner_proc_unlock(node->proc);
+		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
 		binder_debug(BINDER_DEBUG_TRANSACTION,
 		binder_debug(BINDER_DEBUG_TRANSACTION,
 			     "        ref %d desc %d -> node %d u%016llx\n",
 			     "        ref %d desc %d -> node %d u%016llx\n",
-			     ref->debug_id, ref->desc, ref->node->debug_id,
-			     (u64)ref->node->ptr);
+			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
+			     (u64)node->ptr);
+		binder_node_unlock(node);
 	} else {
 	} else {
-		struct binder_ref *new_ref;
+		int ret;
+		struct binder_ref_data dest_rdata;
 
 
-		new_ref = binder_get_ref_for_node(target_proc, ref->node);
-		if (!new_ref)
-			return -EINVAL;
+		binder_node_unlock(node);
+		ret = binder_inc_ref_for_node(target_proc, node,
+				fp->hdr.type == BINDER_TYPE_HANDLE,
+				NULL, &dest_rdata);
+		if (ret)
+			goto done;
 
 
 		fp->binder = 0;
 		fp->binder = 0;
-		fp->handle = new_ref->desc;
+		fp->handle = dest_rdata.desc;
 		fp->cookie = 0;
 		fp->cookie = 0;
-		binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
-			       NULL);
-		trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+						    &dest_rdata);
 		binder_debug(BINDER_DEBUG_TRANSACTION,
 		binder_debug(BINDER_DEBUG_TRANSACTION,
 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
-			     ref->debug_id, ref->desc, new_ref->debug_id,
-			     new_ref->desc, ref->node->debug_id);
+			     src_rdata.debug_id, src_rdata.desc,
+			     dest_rdata.debug_id, dest_rdata.desc,
+			     node->debug_id);
 	}
 	}
-	return 0;
+done:
+	binder_put_node(node);
+	return ret;
 }
 }
 
 
 static int binder_translate_fd(int fd,
 static int binder_translate_fd(int fd,
@@ -1750,7 +2440,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
 	 * Since the parent was already fixed up, convert it
 	 * Since the parent was already fixed up, convert it
 	 * back to the kernel address space to access it
 	 * back to the kernel address space to access it
 	 */
 	 */
-	parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+	parent_buffer = parent->buffer -
+		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
 	fd_array = (u32 *)(parent_buffer + fda->parent_offset);
 	fd_array = (u32 *)(parent_buffer + fda->parent_offset);
 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
 	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,12 +2509,80 @@ static int binder_fixup_parent(struct binder_transaction *t,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	parent_buffer = (u8 *)(parent->buffer -
 	parent_buffer = (u8 *)(parent->buffer -
-			       target_proc->user_buffer_offset);
+			binder_alloc_get_user_buffer_offset(
+				&target_proc->alloc));
 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
 	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t:		transaction to send
+ * @proc:	process to send the transaction to
+ * @thread:	thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return:	true if the transactions was successfully queued
+ *		false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+				    struct binder_proc *proc,
+				    struct binder_thread *thread)
+{
+	struct list_head *target_list = NULL;
+	struct binder_node *node = t->buffer->target_node;
+	bool oneway = !!(t->flags & TF_ONE_WAY);
+	bool wakeup = true;
+
+	BUG_ON(!node);
+	binder_node_lock(node);
+	if (oneway) {
+		BUG_ON(thread);
+		if (node->has_async_transaction) {
+			target_list = &node->async_todo;
+			wakeup = false;
+		} else {
+			node->has_async_transaction = 1;
+		}
+	}
+
+	binder_inner_proc_lock(proc);
+
+	if (proc->is_dead || (thread && thread->is_dead)) {
+		binder_inner_proc_unlock(proc);
+		binder_node_unlock(node);
+		return false;
+	}
+
+	if (!thread && !target_list)
+		thread = binder_select_thread_ilocked(proc);
+
+	if (thread)
+		target_list = &thread->todo;
+	else if (!target_list)
+		target_list = &proc->todo;
+	else
+		BUG_ON(target_list != &node->async_todo);
+
+	binder_enqueue_work_ilocked(&t->work, target_list);
+
+	if (wakeup)
+		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+	binder_inner_proc_unlock(proc);
+	binder_node_unlock(node);
+
+	return true;
+}
+
 static void binder_transaction(struct binder_proc *proc,
 static void binder_transaction(struct binder_proc *proc,
 			       struct binder_thread *thread,
 			       struct binder_thread *thread,
 			       struct binder_transaction_data *tr, int reply,
 			       struct binder_transaction_data *tr, int reply,
@@ -1835,19 +2594,21 @@ static void binder_transaction(struct binder_proc *proc,
 	binder_size_t *offp, *off_end, *off_start;
 	binder_size_t *offp, *off_end, *off_start;
 	binder_size_t off_min;
 	binder_size_t off_min;
 	u8 *sg_bufp, *sg_buf_end;
 	u8 *sg_bufp, *sg_buf_end;
-	struct binder_proc *target_proc;
+	struct binder_proc *target_proc = NULL;
 	struct binder_thread *target_thread = NULL;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
 	struct binder_node *target_node = NULL;
-	struct list_head *target_list;
-	wait_queue_head_t *target_wait;
 	struct binder_transaction *in_reply_to = NULL;
 	struct binder_transaction *in_reply_to = NULL;
 	struct binder_transaction_log_entry *e;
 	struct binder_transaction_log_entry *e;
-	uint32_t return_error;
+	uint32_t return_error = 0;
+	uint32_t return_error_param = 0;
+	uint32_t return_error_line = 0;
 	struct binder_buffer_object *last_fixup_obj = NULL;
 	struct binder_buffer_object *last_fixup_obj = NULL;
 	binder_size_t last_fixup_min_off = 0;
 	binder_size_t last_fixup_min_off = 0;
 	struct binder_context *context = proc->context;
 	struct binder_context *context = proc->context;
+	int t_debug_id = atomic_inc_return(&binder_last_id);
 
 
 	e = binder_transaction_log_add(&binder_transaction_log);
 	e = binder_transaction_log_add(&binder_transaction_log);
+	e->debug_id = t_debug_id;
 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
 	e->from_proc = proc->pid;
 	e->from_proc = proc->pid;
 	e->from_thread = thread->pid;
 	e->from_thread = thread->pid;
@@ -1857,29 +2618,40 @@ static void binder_transaction(struct binder_proc *proc,
 	e->context_name = proc->context->name;
 	e->context_name = proc->context->name;
 
 
 	if (reply) {
 	if (reply) {
+		binder_inner_proc_lock(proc);
 		in_reply_to = thread->transaction_stack;
 		in_reply_to = thread->transaction_stack;
 		if (in_reply_to == NULL) {
 		if (in_reply_to == NULL) {
+			binder_inner_proc_unlock(proc);
 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
 					  proc->pid, thread->pid);
 					  proc->pid, thread->pid);
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EPROTO;
+			return_error_line = __LINE__;
 			goto err_empty_call_stack;
 			goto err_empty_call_stack;
 		}
 		}
-		binder_set_nice(in_reply_to->saved_priority);
 		if (in_reply_to->to_thread != thread) {
 		if (in_reply_to->to_thread != thread) {
+			spin_lock(&in_reply_to->lock);
 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
 				proc->pid, thread->pid, in_reply_to->debug_id,
 				proc->pid, thread->pid, in_reply_to->debug_id,
 				in_reply_to->to_proc ?
 				in_reply_to->to_proc ?
 				in_reply_to->to_proc->pid : 0,
 				in_reply_to->to_proc->pid : 0,
 				in_reply_to->to_thread ?
 				in_reply_to->to_thread ?
 				in_reply_to->to_thread->pid : 0);
 				in_reply_to->to_thread->pid : 0);
+			spin_unlock(&in_reply_to->lock);
+			binder_inner_proc_unlock(proc);
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EPROTO;
+			return_error_line = __LINE__;
 			in_reply_to = NULL;
 			in_reply_to = NULL;
 			goto err_bad_call_stack;
 			goto err_bad_call_stack;
 		}
 		}
 		thread->transaction_stack = in_reply_to->to_parent;
 		thread->transaction_stack = in_reply_to->to_parent;
-		target_thread = in_reply_to->from;
+		binder_inner_proc_unlock(proc);
+		binder_set_nice(in_reply_to->saved_priority);
+		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
 		if (target_thread == NULL) {
 		if (target_thread == NULL) {
 			return_error = BR_DEAD_REPLY;
 			return_error = BR_DEAD_REPLY;
+			return_error_line = __LINE__;
 			goto err_dead_binder;
 			goto err_dead_binder;
 		}
 		}
 		if (target_thread->transaction_stack != in_reply_to) {
 		if (target_thread->transaction_stack != in_reply_to) {
@@ -1888,89 +2660,137 @@ static void binder_transaction(struct binder_proc *proc,
 				target_thread->transaction_stack ?
 				target_thread->transaction_stack ?
 				target_thread->transaction_stack->debug_id : 0,
 				target_thread->transaction_stack->debug_id : 0,
 				in_reply_to->debug_id);
 				in_reply_to->debug_id);
+			binder_inner_proc_unlock(target_thread->proc);
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EPROTO;
+			return_error_line = __LINE__;
 			in_reply_to = NULL;
 			in_reply_to = NULL;
 			target_thread = NULL;
 			target_thread = NULL;
 			goto err_dead_binder;
 			goto err_dead_binder;
 		}
 		}
 		target_proc = target_thread->proc;
 		target_proc = target_thread->proc;
+		target_proc->tmp_ref++;
+		binder_inner_proc_unlock(target_thread->proc);
 	} else {
 	} else {
 		if (tr->target.handle) {
 		if (tr->target.handle) {
 			struct binder_ref *ref;
 			struct binder_ref *ref;
 
 
-			ref = binder_get_ref(proc, tr->target.handle, true);
-			if (ref == NULL) {
+			/*
+			 * There must already be a strong ref
+			 * on this node. If so, do a strong
+			 * increment on the node to ensure it
+			 * stays alive until the transaction is
+			 * done.
+			 */
+			binder_proc_lock(proc);
+			ref = binder_get_ref_olocked(proc, tr->target.handle,
+						     true);
+			if (ref) {
+				binder_inc_node(ref->node, 1, 0, NULL);
+				target_node = ref->node;
+			}
+			binder_proc_unlock(proc);
+			if (target_node == NULL) {
 				binder_user_error("%d:%d got transaction to invalid handle\n",
 				binder_user_error("%d:%d got transaction to invalid handle\n",
 					proc->pid, thread->pid);
 					proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
 				goto err_invalid_target_handle;
 				goto err_invalid_target_handle;
 			}
 			}
-			target_node = ref->node;
 		} else {
 		} else {
+			mutex_lock(&context->context_mgr_node_lock);
 			target_node = context->binder_context_mgr_node;
 			target_node = context->binder_context_mgr_node;
 			if (target_node == NULL) {
 			if (target_node == NULL) {
 				return_error = BR_DEAD_REPLY;
 				return_error = BR_DEAD_REPLY;
+				mutex_unlock(&context->context_mgr_node_lock);
+				return_error_line = __LINE__;
 				goto err_no_context_mgr_node;
 				goto err_no_context_mgr_node;
 			}
 			}
+			binder_inc_node(target_node, 1, 0, NULL);
+			mutex_unlock(&context->context_mgr_node_lock);
 		}
 		}
 		e->to_node = target_node->debug_id;
 		e->to_node = target_node->debug_id;
+		binder_node_lock(target_node);
 		target_proc = target_node->proc;
 		target_proc = target_node->proc;
 		if (target_proc == NULL) {
 		if (target_proc == NULL) {
+			binder_node_unlock(target_node);
 			return_error = BR_DEAD_REPLY;
 			return_error = BR_DEAD_REPLY;
+			return_error_line = __LINE__;
 			goto err_dead_binder;
 			goto err_dead_binder;
 		}
 		}
+		binder_inner_proc_lock(target_proc);
+		target_proc->tmp_ref++;
+		binder_inner_proc_unlock(target_proc);
+		binder_node_unlock(target_node);
 		if (security_binder_transaction(proc->tsk,
 		if (security_binder_transaction(proc->tsk,
 						target_proc->tsk) < 0) {
 						target_proc->tsk) < 0) {
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EPERM;
+			return_error_line = __LINE__;
 			goto err_invalid_target_handle;
 			goto err_invalid_target_handle;
 		}
 		}
+		binder_inner_proc_lock(proc);
 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
 			struct binder_transaction *tmp;
 			struct binder_transaction *tmp;
 
 
 			tmp = thread->transaction_stack;
 			tmp = thread->transaction_stack;
 			if (tmp->to_thread != thread) {
 			if (tmp->to_thread != thread) {
+				spin_lock(&tmp->lock);
 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
 					proc->pid, thread->pid, tmp->debug_id,
 					proc->pid, thread->pid, tmp->debug_id,
 					tmp->to_proc ? tmp->to_proc->pid : 0,
 					tmp->to_proc ? tmp->to_proc->pid : 0,
 					tmp->to_thread ?
 					tmp->to_thread ?
 					tmp->to_thread->pid : 0);
 					tmp->to_thread->pid : 0);
+				spin_unlock(&tmp->lock);
+				binder_inner_proc_unlock(proc);
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = -EPROTO;
+				return_error_line = __LINE__;
 				goto err_bad_call_stack;
 				goto err_bad_call_stack;
 			}
 			}
 			while (tmp) {
 			while (tmp) {
-				if (tmp->from && tmp->from->proc == target_proc)
-					target_thread = tmp->from;
+				struct binder_thread *from;
+
+				spin_lock(&tmp->lock);
+				from = tmp->from;
+				if (from && from->proc == target_proc) {
+					atomic_inc(&from->tmp_ref);
+					target_thread = from;
+					spin_unlock(&tmp->lock);
+					break;
+				}
+				spin_unlock(&tmp->lock);
 				tmp = tmp->from_parent;
 				tmp = tmp->from_parent;
 			}
 			}
 		}
 		}
+		binder_inner_proc_unlock(proc);
 	}
 	}
-	if (target_thread) {
+	if (target_thread)
 		e->to_thread = target_thread->pid;
 		e->to_thread = target_thread->pid;
-		target_list = &target_thread->todo;
-		target_wait = &target_thread->wait;
-	} else {
-		target_list = &target_proc->todo;
-		target_wait = &target_proc->wait;
-	}
 	e->to_proc = target_proc->pid;
 	e->to_proc = target_proc->pid;
 
 
 	/* TODO: reuse incoming transaction for reply */
 	/* TODO: reuse incoming transaction for reply */
 	t = kzalloc(sizeof(*t), GFP_KERNEL);
 	t = kzalloc(sizeof(*t), GFP_KERNEL);
 	if (t == NULL) {
 	if (t == NULL) {
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -ENOMEM;
+		return_error_line = __LINE__;
 		goto err_alloc_t_failed;
 		goto err_alloc_t_failed;
 	}
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 	binder_stats_created(BINDER_STAT_TRANSACTION);
+	spin_lock_init(&t->lock);
 
 
 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
 	if (tcomplete == NULL) {
 	if (tcomplete == NULL) {
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -ENOMEM;
+		return_error_line = __LINE__;
 		goto err_alloc_tcomplete_failed;
 		goto err_alloc_tcomplete_failed;
 	}
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
 
 
-	t->debug_id = ++binder_last_id;
-	e->debug_id = t->debug_id;
+	t->debug_id = t_debug_id;
 
 
 	if (reply)
 	if (reply)
 		binder_debug(BINDER_DEBUG_TRANSACTION,
 		binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2004,11 +2824,18 @@ static void binder_transaction(struct binder_proc *proc,
 
 
 	trace_binder_transaction(reply, t, target_node);
 	trace_binder_transaction(reply, t, target_node);
 
 
-	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
 		tr->offsets_size, extra_buffers_size,
 		tr->offsets_size, extra_buffers_size,
 		!reply && (t->flags & TF_ONE_WAY));
 		!reply && (t->flags & TF_ONE_WAY));
-	if (t->buffer == NULL) {
-		return_error = BR_FAILED_REPLY;
+	if (IS_ERR(t->buffer)) {
+		/*
+		 * -ESRCH indicates VMA cleared. The target is dying.
+		 */
+		return_error_param = PTR_ERR(t->buffer);
+		return_error = return_error_param == -ESRCH ?
+			BR_DEAD_REPLY : BR_FAILED_REPLY;
+		return_error_line = __LINE__;
+		t->buffer = NULL;
 		goto err_binder_alloc_buf_failed;
 		goto err_binder_alloc_buf_failed;
 	}
 	}
 	t->buffer->allow_user_free = 0;
 	t->buffer->allow_user_free = 0;
@@ -2016,9 +2843,6 @@ static void binder_transaction(struct binder_proc *proc,
 	t->buffer->transaction = t;
 	t->buffer->transaction = t;
 	t->buffer->target_node = target_node;
 	t->buffer->target_node = target_node;
 	trace_binder_transaction_alloc_buf(t->buffer);
 	trace_binder_transaction_alloc_buf(t->buffer);
-	if (target_node)
-		binder_inc_node(target_node, 1, 0, NULL);
-
 	off_start = (binder_size_t *)(t->buffer->data +
 	off_start = (binder_size_t *)(t->buffer->data +
 				      ALIGN(tr->data_size, sizeof(void *)));
 				      ALIGN(tr->data_size, sizeof(void *)));
 	offp = off_start;
 	offp = off_start;
@@ -2028,6 +2852,8 @@ static void binder_transaction(struct binder_proc *proc,
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 				proc->pid, thread->pid);
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -EFAULT;
+		return_error_line = __LINE__;
 		goto err_copy_data_failed;
 		goto err_copy_data_failed;
 	}
 	}
 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
 	if (copy_from_user(offp, (const void __user *)(uintptr_t)
@@ -2035,12 +2861,16 @@ static void binder_transaction(struct binder_proc *proc,
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				proc->pid, thread->pid);
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -EFAULT;
+		return_error_line = __LINE__;
 		goto err_copy_data_failed;
 		goto err_copy_data_failed;
 	}
 	}
 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
 				proc->pid, thread->pid, (u64)tr->offsets_size);
 				proc->pid, thread->pid, (u64)tr->offsets_size);
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -EINVAL;
+		return_error_line = __LINE__;
 		goto err_bad_offset;
 		goto err_bad_offset;
 	}
 	}
 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2048,6 +2878,8 @@ static void binder_transaction(struct binder_proc *proc,
 				  proc->pid, thread->pid,
 				  proc->pid, thread->pid,
 				  (u64)extra_buffers_size);
 				  (u64)extra_buffers_size);
 		return_error = BR_FAILED_REPLY;
 		return_error = BR_FAILED_REPLY;
+		return_error_param = -EINVAL;
+		return_error_line = __LINE__;
 		goto err_bad_offset;
 		goto err_bad_offset;
 	}
 	}
 	off_end = (void *)off_start + tr->offsets_size;
 	off_end = (void *)off_start + tr->offsets_size;
@@ -2064,6 +2896,8 @@ static void binder_transaction(struct binder_proc *proc,
 					  (u64)off_min,
 					  (u64)off_min,
 					  (u64)t->buffer->data_size);
 					  (u64)t->buffer->data_size);
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EINVAL;
+			return_error_line = __LINE__;
 			goto err_bad_offset;
 			goto err_bad_offset;
 		}
 		}
 
 
@@ -2078,6 +2912,8 @@ static void binder_transaction(struct binder_proc *proc,
 			ret = binder_translate_binder(fp, t, thread);
 			ret = binder_translate_binder(fp, t, thread);
 			if (ret < 0) {
 			if (ret < 0) {
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = ret;
+				return_error_line = __LINE__;
 				goto err_translate_failed;
 				goto err_translate_failed;
 			}
 			}
 		} break;
 		} break;
@@ -2089,6 +2925,8 @@ static void binder_transaction(struct binder_proc *proc,
 			ret = binder_translate_handle(fp, t, thread);
 			ret = binder_translate_handle(fp, t, thread);
 			if (ret < 0) {
 			if (ret < 0) {
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = ret;
+				return_error_line = __LINE__;
 				goto err_translate_failed;
 				goto err_translate_failed;
 			}
 			}
 		} break;
 		} break;
@@ -2100,6 +2938,8 @@ static void binder_transaction(struct binder_proc *proc,
 
 
 			if (target_fd < 0) {
 			if (target_fd < 0) {
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = target_fd;
+				return_error_line = __LINE__;
 				goto err_translate_failed;
 				goto err_translate_failed;
 			}
 			}
 			fp->pad_binder = 0;
 			fp->pad_binder = 0;
@@ -2116,6 +2956,8 @@ static void binder_transaction(struct binder_proc *proc,
 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
 						  proc->pid, thread->pid);
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
 				goto err_bad_parent;
 				goto err_bad_parent;
 			}
 			}
 			if (!binder_validate_fixup(t->buffer, off_start,
 			if (!binder_validate_fixup(t->buffer, off_start,
@@ -2125,12 +2967,16 @@ static void binder_transaction(struct binder_proc *proc,
 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
 						  proc->pid, thread->pid);
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
 				goto err_bad_parent;
 				goto err_bad_parent;
 			}
 			}
 			ret = binder_translate_fd_array(fda, parent, t, thread,
 			ret = binder_translate_fd_array(fda, parent, t, thread,
 							in_reply_to);
 							in_reply_to);
 			if (ret < 0) {
 			if (ret < 0) {
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = ret;
+				return_error_line = __LINE__;
 				goto err_translate_failed;
 				goto err_translate_failed;
 			}
 			}
 			last_fixup_obj = parent;
 			last_fixup_obj = parent;
@@ -2146,6 +2992,8 @@ static void binder_transaction(struct binder_proc *proc,
 				binder_user_error("%d:%d got transaction with too large buffer\n",
 				binder_user_error("%d:%d got transaction with too large buffer\n",
 						  proc->pid, thread->pid);
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = -EINVAL;
+				return_error_line = __LINE__;
 				goto err_bad_offset;
 				goto err_bad_offset;
 			}
 			}
 			if (copy_from_user(sg_bufp,
 			if (copy_from_user(sg_bufp,
@@ -2153,12 +3001,15 @@ static void binder_transaction(struct binder_proc *proc,
 					   bp->buffer, bp->length)) {
 					   bp->buffer, bp->length)) {
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 						  proc->pid, thread->pid);
 						  proc->pid, thread->pid);
+				return_error_param = -EFAULT;
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_line = __LINE__;
 				goto err_copy_data_failed;
 				goto err_copy_data_failed;
 			}
 			}
 			/* Fixup buffer pointer to target proc address space */
 			/* Fixup buffer pointer to target proc address space */
 			bp->buffer = (uintptr_t)sg_bufp +
 			bp->buffer = (uintptr_t)sg_bufp +
-				target_proc->user_buffer_offset;
+				binder_alloc_get_user_buffer_offset(
+						&target_proc->alloc);
 			sg_bufp += ALIGN(bp->length, sizeof(u64));
 			sg_bufp += ALIGN(bp->length, sizeof(u64));
 
 
 			ret = binder_fixup_parent(t, thread, bp, off_start,
 			ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2167,6 +3018,8 @@ static void binder_transaction(struct binder_proc *proc,
 						  last_fixup_min_off);
 						  last_fixup_min_off);
 			if (ret < 0) {
 			if (ret < 0) {
 				return_error = BR_FAILED_REPLY;
 				return_error = BR_FAILED_REPLY;
+				return_error_param = ret;
+				return_error_line = __LINE__;
 				goto err_translate_failed;
 				goto err_translate_failed;
 			}
 			}
 			last_fixup_obj = bp;
 			last_fixup_obj = bp;
@@ -2176,38 +3029,60 @@ static void binder_transaction(struct binder_proc *proc,
 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
 				proc->pid, thread->pid, hdr->type);
 				proc->pid, thread->pid, hdr->type);
 			return_error = BR_FAILED_REPLY;
 			return_error = BR_FAILED_REPLY;
+			return_error_param = -EINVAL;
+			return_error_line = __LINE__;
 			goto err_bad_object_type;
 			goto err_bad_object_type;
 		}
 		}
 	}
 	}
+	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+	binder_enqueue_work(proc, tcomplete, &thread->todo);
+	t->work.type = BINDER_WORK_TRANSACTION;
+
 	if (reply) {
 	if (reply) {
+		binder_inner_proc_lock(target_proc);
+		if (target_thread->is_dead) {
+			binder_inner_proc_unlock(target_proc);
+			goto err_dead_proc_or_thread;
+		}
 		BUG_ON(t->buffer->async_transaction != 0);
 		BUG_ON(t->buffer->async_transaction != 0);
-		binder_pop_transaction(target_thread, in_reply_to);
+		binder_pop_transaction_ilocked(target_thread, in_reply_to);
+		binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+		binder_inner_proc_unlock(target_proc);
+		wake_up_interruptible_sync(&target_thread->wait);
+		binder_free_transaction(in_reply_to);
 	} else if (!(t->flags & TF_ONE_WAY)) {
 	} else if (!(t->flags & TF_ONE_WAY)) {
 		BUG_ON(t->buffer->async_transaction != 0);
 		BUG_ON(t->buffer->async_transaction != 0);
+		binder_inner_proc_lock(proc);
 		t->need_reply = 1;
 		t->need_reply = 1;
 		t->from_parent = thread->transaction_stack;
 		t->from_parent = thread->transaction_stack;
 		thread->transaction_stack = t;
 		thread->transaction_stack = t;
+		binder_inner_proc_unlock(proc);
+		if (!binder_proc_transaction(t, target_proc, target_thread)) {
+			binder_inner_proc_lock(proc);
+			binder_pop_transaction_ilocked(thread, t);
+			binder_inner_proc_unlock(proc);
+			goto err_dead_proc_or_thread;
+		}
 	} else {
 	} else {
 		BUG_ON(target_node == NULL);
 		BUG_ON(target_node == NULL);
 		BUG_ON(t->buffer->async_transaction != 1);
 		BUG_ON(t->buffer->async_transaction != 1);
-		if (target_node->has_async_transaction) {
-			target_list = &target_node->async_todo;
-			target_wait = NULL;
-		} else
-			target_node->has_async_transaction = 1;
-	}
-	t->work.type = BINDER_WORK_TRANSACTION;
-	list_add_tail(&t->work.entry, target_list);
-	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
-	list_add_tail(&tcomplete->entry, &thread->todo);
-	if (target_wait) {
-		if (reply || !(t->flags & TF_ONE_WAY))
-			wake_up_interruptible_sync(target_wait);
-		else
-			wake_up_interruptible(target_wait);
+		if (!binder_proc_transaction(t, target_proc, NULL))
+			goto err_dead_proc_or_thread;
 	}
 	}
+	if (target_thread)
+		binder_thread_dec_tmpref(target_thread);
+	binder_proc_dec_tmpref(target_proc);
+	/*
+	 * write barrier to synchronize with initialization
+	 * of log entry
+	 */
+	smp_wmb();
+	WRITE_ONCE(e->debug_id_done, t_debug_id);
 	return;
 	return;
 
 
+err_dead_proc_or_thread:
+	return_error = BR_DEAD_REPLY;
+	return_error_line = __LINE__;
 err_translate_failed:
 err_translate_failed:
 err_bad_object_type:
 err_bad_object_type:
 err_bad_offset:
 err_bad_offset:
@@ -2215,8 +3090,9 @@ err_bad_parent:
 err_copy_data_failed:
 err_copy_data_failed:
 	trace_binder_transaction_failed_buffer_release(t->buffer);
 	trace_binder_transaction_failed_buffer_release(t->buffer);
 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	target_node = NULL;
 	t->buffer->transaction = NULL;
 	t->buffer->transaction = NULL;
-	binder_free_buf(target_proc, t->buffer);
+	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
 err_binder_alloc_buf_failed:
 err_binder_alloc_buf_failed:
 	kfree(tcomplete);
 	kfree(tcomplete);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2229,24 +3105,49 @@ err_empty_call_stack:
 err_dead_binder:
 err_dead_binder:
 err_invalid_target_handle:
 err_invalid_target_handle:
 err_no_context_mgr_node:
 err_no_context_mgr_node:
+	if (target_thread)
+		binder_thread_dec_tmpref(target_thread);
+	if (target_proc)
+		binder_proc_dec_tmpref(target_proc);
+	if (target_node)
+		binder_dec_node(target_node, 1, 0);
+
 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-		     "%d:%d transaction failed %d, size %lld-%lld\n",
-		     proc->pid, thread->pid, return_error,
-		     (u64)tr->data_size, (u64)tr->offsets_size);
+		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+		     proc->pid, thread->pid, return_error, return_error_param,
+		     (u64)tr->data_size, (u64)tr->offsets_size,
+		     return_error_line);
 
 
 	{
 	{
 		struct binder_transaction_log_entry *fe;
 		struct binder_transaction_log_entry *fe;
 
 
+		e->return_error = return_error;
+		e->return_error_param = return_error_param;
+		e->return_error_line = return_error_line;
 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
 		*fe = *e;
 		*fe = *e;
+		/*
+		 * write barrier to synchronize with initialization
+		 * of log entry
+		 */
+		smp_wmb();
+		WRITE_ONCE(e->debug_id_done, t_debug_id);
+		WRITE_ONCE(fe->debug_id_done, t_debug_id);
 	}
 	}
 
 
-	BUG_ON(thread->return_error != BR_OK);
+	BUG_ON(thread->return_error.cmd != BR_OK);
 	if (in_reply_to) {
 	if (in_reply_to) {
-		thread->return_error = BR_TRANSACTION_COMPLETE;
+		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+		binder_enqueue_work(thread->proc,
+				    &thread->return_error.work,
+				    &thread->todo);
 		binder_send_failed_reply(in_reply_to, return_error);
 		binder_send_failed_reply(in_reply_to, return_error);
-	} else
-		thread->return_error = return_error;
+	} else {
+		thread->return_error.cmd = return_error;
+		binder_enqueue_work(thread->proc,
+				    &thread->return_error.work,
+				    &thread->todo);
+	}
 }
 }
 
 
 static int binder_thread_write(struct binder_proc *proc,
 static int binder_thread_write(struct binder_proc *proc,
@@ -2260,15 +3161,17 @@ static int binder_thread_write(struct binder_proc *proc,
 	void __user *ptr = buffer + *consumed;
 	void __user *ptr = buffer + *consumed;
 	void __user *end = buffer + size;
 	void __user *end = buffer + size;
 
 
-	while (ptr < end && thread->return_error == BR_OK) {
+	while (ptr < end && thread->return_error.cmd == BR_OK) {
+		int ret;
+
 		if (get_user(cmd, (uint32_t __user *)ptr))
 		if (get_user(cmd, (uint32_t __user *)ptr))
 			return -EFAULT;
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		ptr += sizeof(uint32_t);
 		trace_binder_command(cmd);
 		trace_binder_command(cmd);
 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
-			binder_stats.bc[_IOC_NR(cmd)]++;
-			proc->stats.bc[_IOC_NR(cmd)]++;
-			thread->stats.bc[_IOC_NR(cmd)]++;
+			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
 		}
 		}
 		switch (cmd) {
 		switch (cmd) {
 		case BC_INCREFS:
 		case BC_INCREFS:
@@ -2276,53 +3179,61 @@ static int binder_thread_write(struct binder_proc *proc,
 		case BC_RELEASE:
 		case BC_RELEASE:
 		case BC_DECREFS: {
 		case BC_DECREFS: {
 			uint32_t target;
 			uint32_t target;
-			struct binder_ref *ref;
 			const char *debug_string;
 			const char *debug_string;
+			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+			struct binder_ref_data rdata;
 
 
 			if (get_user(target, (uint32_t __user *)ptr))
 			if (get_user(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 				return -EFAULT;
+
 			ptr += sizeof(uint32_t);
 			ptr += sizeof(uint32_t);
-			if (target == 0 && context->binder_context_mgr_node &&
-			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
-				ref = binder_get_ref_for_node(proc,
-					context->binder_context_mgr_node);
-				if (ref->desc != target) {
-					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
-						proc->pid, thread->pid,
-						ref->desc);
-				}
-			} else
-				ref = binder_get_ref(proc, target,
-						     cmd == BC_ACQUIRE ||
-						     cmd == BC_RELEASE);
-			if (ref == NULL) {
-				binder_user_error("%d:%d refcount change on invalid ref %d\n",
-					proc->pid, thread->pid, target);
-				break;
+			ret = -1;
+			if (increment && !target) {
+				struct binder_node *ctx_mgr_node;
+				mutex_lock(&context->context_mgr_node_lock);
+				ctx_mgr_node = context->binder_context_mgr_node;
+				if (ctx_mgr_node)
+					ret = binder_inc_ref_for_node(
+							proc, ctx_mgr_node,
+							strong, NULL, &rdata);
+				mutex_unlock(&context->context_mgr_node_lock);
+			}
+			if (ret)
+				ret = binder_update_ref_for_handle(
+						proc, target, increment, strong,
+						&rdata);
+			if (!ret && rdata.desc != target) {
+				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+					proc->pid, thread->pid,
+					target, rdata.desc);
 			}
 			}
 			switch (cmd) {
 			switch (cmd) {
 			case BC_INCREFS:
 			case BC_INCREFS:
 				debug_string = "IncRefs";
 				debug_string = "IncRefs";
-				binder_inc_ref(ref, 0, NULL);
 				break;
 				break;
 			case BC_ACQUIRE:
 			case BC_ACQUIRE:
 				debug_string = "Acquire";
 				debug_string = "Acquire";
-				binder_inc_ref(ref, 1, NULL);
 				break;
 				break;
 			case BC_RELEASE:
 			case BC_RELEASE:
 				debug_string = "Release";
 				debug_string = "Release";
-				binder_dec_ref(ref, 1);
 				break;
 				break;
 			case BC_DECREFS:
 			case BC_DECREFS:
 			default:
 			default:
 				debug_string = "DecRefs";
 				debug_string = "DecRefs";
-				binder_dec_ref(ref, 0);
+				break;
+			}
+			if (ret) {
+				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+					proc->pid, thread->pid, debug_string,
+					strong, target, ret);
 				break;
 				break;
 			}
 			}
 			binder_debug(BINDER_DEBUG_USER_REFS,
 			binder_debug(BINDER_DEBUG_USER_REFS,
-				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
-				     proc->pid, thread->pid, debug_string, ref->debug_id,
-				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+				     "%d:%d %s ref %d desc %d s %d w %d\n",
+				     proc->pid, thread->pid, debug_string,
+				     rdata.debug_id, rdata.desc, rdata.strong,
+				     rdata.weak);
 			break;
 			break;
 		}
 		}
 		case BC_INCREFS_DONE:
 		case BC_INCREFS_DONE:
@@ -2330,6 +3241,7 @@ static int binder_thread_write(struct binder_proc *proc,
 			binder_uintptr_t node_ptr;
 			binder_uintptr_t node_ptr;
 			binder_uintptr_t cookie;
 			binder_uintptr_t cookie;
 			struct binder_node *node;
 			struct binder_node *node;
+			bool free_node;
 
 
 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 				return -EFAULT;
@@ -2354,13 +3266,17 @@ static int binder_thread_write(struct binder_proc *proc,
 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
 					(u64)node_ptr, node->debug_id,
 					(u64)node_ptr, node->debug_id,
 					(u64)cookie, (u64)node->cookie);
 					(u64)cookie, (u64)node->cookie);
+				binder_put_node(node);
 				break;
 				break;
 			}
 			}
+			binder_node_inner_lock(node);
 			if (cmd == BC_ACQUIRE_DONE) {
 			if (cmd == BC_ACQUIRE_DONE) {
 				if (node->pending_strong_ref == 0) {
 				if (node->pending_strong_ref == 0) {
 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
 						proc->pid, thread->pid,
 						proc->pid, thread->pid,
 						node->debug_id);
 						node->debug_id);
+					binder_node_inner_unlock(node);
+					binder_put_node(node);
 					break;
 					break;
 				}
 				}
 				node->pending_strong_ref = 0;
 				node->pending_strong_ref = 0;
@@ -2369,16 +3285,23 @@ static int binder_thread_write(struct binder_proc *proc,
 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
 						proc->pid, thread->pid,
 						proc->pid, thread->pid,
 						node->debug_id);
 						node->debug_id);
+					binder_node_inner_unlock(node);
+					binder_put_node(node);
 					break;
 					break;
 				}
 				}
 				node->pending_weak_ref = 0;
 				node->pending_weak_ref = 0;
 			}
 			}
-			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+			free_node = binder_dec_node_nilocked(node,
+					cmd == BC_ACQUIRE_DONE, 0);
+			WARN_ON(free_node);
 			binder_debug(BINDER_DEBUG_USER_REFS,
 			binder_debug(BINDER_DEBUG_USER_REFS,
-				     "%d:%d %s node %d ls %d lw %d\n",
+				     "%d:%d %s node %d ls %d lw %d tr %d\n",
 				     proc->pid, thread->pid,
 				     proc->pid, thread->pid,
 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
+				     node->debug_id, node->local_strong_refs,
+				     node->local_weak_refs, node->tmp_refs);
+			binder_node_inner_unlock(node);
+			binder_put_node(node);
 			break;
 			break;
 		}
 		}
 		case BC_ATTEMPT_ACQUIRE:
 		case BC_ATTEMPT_ACQUIRE:
@@ -2396,7 +3319,8 @@ static int binder_thread_write(struct binder_proc *proc,
 				return -EFAULT;
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			ptr += sizeof(binder_uintptr_t);
 
 
-			buffer = binder_buffer_lookup(proc, data_ptr);
+			buffer = binder_alloc_prepare_to_free(&proc->alloc,
+							      data_ptr);
 			if (buffer == NULL) {
 			if (buffer == NULL) {
 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
 				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
 					proc->pid, thread->pid, (u64)data_ptr);
 					proc->pid, thread->pid, (u64)data_ptr);
@@ -2418,15 +3342,27 @@ static int binder_thread_write(struct binder_proc *proc,
 				buffer->transaction = NULL;
 				buffer->transaction = NULL;
 			}
 			}
 			if (buffer->async_transaction && buffer->target_node) {
 			if (buffer->async_transaction && buffer->target_node) {
-				BUG_ON(!buffer->target_node->has_async_transaction);
-				if (list_empty(&buffer->target_node->async_todo))
-					buffer->target_node->has_async_transaction = 0;
-				else
-					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+				struct binder_node *buf_node;
+				struct binder_work *w;
+
+				buf_node = buffer->target_node;
+				binder_node_inner_lock(buf_node);
+				BUG_ON(!buf_node->has_async_transaction);
+				BUG_ON(buf_node->proc != proc);
+				w = binder_dequeue_work_head_ilocked(
+						&buf_node->async_todo);
+				if (!w) {
+					buf_node->has_async_transaction = 0;
+				} else {
+					binder_enqueue_work_ilocked(
+							w, &proc->todo);
+					binder_wakeup_proc_ilocked(proc);
+				}
+				binder_node_inner_unlock(buf_node);
 			}
 			}
 			trace_binder_transaction_buffer_release(buffer);
 			trace_binder_transaction_buffer_release(buffer);
 			binder_transaction_buffer_release(proc, buffer, NULL);
 			binder_transaction_buffer_release(proc, buffer, NULL);
-			binder_free_buf(proc, buffer);
+			binder_alloc_free_buf(&proc->alloc, buffer);
 			break;
 			break;
 		}
 		}
 
 
@@ -2457,6 +3393,7 @@ static int binder_thread_write(struct binder_proc *proc,
 			binder_debug(BINDER_DEBUG_THREADS,
 			binder_debug(BINDER_DEBUG_THREADS,
 				     "%d:%d BC_REGISTER_LOOPER\n",
 				     "%d:%d BC_REGISTER_LOOPER\n",
 				     proc->pid, thread->pid);
 				     proc->pid, thread->pid);
+			binder_inner_proc_lock(proc);
 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2470,6 +3407,7 @@ static int binder_thread_write(struct binder_proc *proc,
 				proc->requested_threads_started++;
 				proc->requested_threads_started++;
 			}
 			}
 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+			binder_inner_proc_unlock(proc);
 			break;
 			break;
 		case BC_ENTER_LOOPER:
 		case BC_ENTER_LOOPER:
 			binder_debug(BINDER_DEBUG_THREADS,
 			binder_debug(BINDER_DEBUG_THREADS,
@@ -2494,7 +3432,7 @@ static int binder_thread_write(struct binder_proc *proc,
 			uint32_t target;
 			uint32_t target;
 			binder_uintptr_t cookie;
 			binder_uintptr_t cookie;
 			struct binder_ref *ref;
 			struct binder_ref *ref;
-			struct binder_ref_death *death;
+			struct binder_ref_death *death = NULL;
 
 
 			if (get_user(target, (uint32_t __user *)ptr))
 			if (get_user(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 				return -EFAULT;
@@ -2502,7 +3440,29 @@ static int binder_thread_write(struct binder_proc *proc,
 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			ptr += sizeof(binder_uintptr_t);
-			ref = binder_get_ref(proc, target, false);
+			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+				/*
+				 * Allocate memory for death notification
+				 * before taking lock
+				 */
+				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				if (death == NULL) {
+					WARN_ON(thread->return_error.cmd !=
+						BR_OK);
+					thread->return_error.cmd = BR_ERROR;
+					binder_enqueue_work(
+						thread->proc,
+						&thread->return_error.work,
+						&thread->todo);
+					binder_debug(
+						BINDER_DEBUG_FAILED_TRANSACTION,
+						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+						proc->pid, thread->pid);
+					break;
+				}
+			}
+			binder_proc_lock(proc);
+			ref = binder_get_ref_olocked(proc, target, false);
 			if (ref == NULL) {
 			if (ref == NULL) {
 				binder_user_error("%d:%d %s invalid ref %d\n",
 				binder_user_error("%d:%d %s invalid ref %d\n",
 					proc->pid, thread->pid,
 					proc->pid, thread->pid,
@@ -2510,6 +3470,8 @@ static int binder_thread_write(struct binder_proc *proc,
 					"BC_REQUEST_DEATH_NOTIFICATION" :
 					"BC_REQUEST_DEATH_NOTIFICATION" :
 					"BC_CLEAR_DEATH_NOTIFICATION",
 					"BC_CLEAR_DEATH_NOTIFICATION",
 					target);
 					target);
+				binder_proc_unlock(proc);
+				kfree(death);
 				break;
 				break;
 			}
 			}
 
 
@@ -2519,21 +3481,18 @@ static int binder_thread_write(struct binder_proc *proc,
 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
 				     "BC_REQUEST_DEATH_NOTIFICATION" :
 				     "BC_REQUEST_DEATH_NOTIFICATION" :
 				     "BC_CLEAR_DEATH_NOTIFICATION",
 				     "BC_CLEAR_DEATH_NOTIFICATION",
-				     (u64)cookie, ref->debug_id, ref->desc,
-				     ref->strong, ref->weak, ref->node->debug_id);
+				     (u64)cookie, ref->data.debug_id,
+				     ref->data.desc, ref->data.strong,
+				     ref->data.weak, ref->node->debug_id);
 
 
+			binder_node_lock(ref->node);
 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
 				if (ref->death) {
 				if (ref->death) {
 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
 						proc->pid, thread->pid);
 						proc->pid, thread->pid);
-					break;
-				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
-				if (death == NULL) {
-					thread->return_error = BR_ERROR;
-					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
-						     proc->pid, thread->pid);
+					binder_node_unlock(ref->node);
+					binder_proc_unlock(proc);
+					kfree(death);
 					break;
 					break;
 				}
 				}
 				binder_stats_created(BINDER_STAT_DEATH);
 				binder_stats_created(BINDER_STAT_DEATH);
@@ -2542,17 +3501,19 @@ static int binder_thread_write(struct binder_proc *proc,
 				ref->death = death;
 				ref->death = death;
 				if (ref->node->proc == NULL) {
 				if (ref->node->proc == NULL) {
 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-						list_add_tail(&ref->death->work.entry, &thread->todo);
-					} else {
-						list_add_tail(&ref->death->work.entry, &proc->todo);
-						wake_up_interruptible(&proc->wait);
-					}
+
+					binder_inner_proc_lock(proc);
+					binder_enqueue_work_ilocked(
+						&ref->death->work, &proc->todo);
+					binder_wakeup_proc_ilocked(proc);
+					binder_inner_proc_unlock(proc);
 				}
 				}
 			} else {
 			} else {
 				if (ref->death == NULL) {
 				if (ref->death == NULL) {
 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
 						proc->pid, thread->pid);
 						proc->pid, thread->pid);
+					binder_node_unlock(ref->node);
+					binder_proc_unlock(proc);
 					break;
 					break;
 				}
 				}
 				death = ref->death;
 				death = ref->death;
@@ -2561,22 +3522,35 @@ static int binder_thread_write(struct binder_proc *proc,
 						proc->pid, thread->pid,
 						proc->pid, thread->pid,
 						(u64)death->cookie,
 						(u64)death->cookie,
 						(u64)cookie);
 						(u64)cookie);
+					binder_node_unlock(ref->node);
+					binder_proc_unlock(proc);
 					break;
 					break;
 				}
 				}
 				ref->death = NULL;
 				ref->death = NULL;
+				binder_inner_proc_lock(proc);
 				if (list_empty(&death->work.entry)) {
 				if (list_empty(&death->work.entry)) {
 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-						list_add_tail(&death->work.entry, &thread->todo);
-					} else {
-						list_add_tail(&death->work.entry, &proc->todo);
-						wake_up_interruptible(&proc->wait);
+					if (thread->looper &
+					    (BINDER_LOOPER_STATE_REGISTERED |
+					     BINDER_LOOPER_STATE_ENTERED))
+						binder_enqueue_work_ilocked(
+								&death->work,
+								&thread->todo);
+					else {
+						binder_enqueue_work_ilocked(
+								&death->work,
+								&proc->todo);
+						binder_wakeup_proc_ilocked(
+								proc);
 					}
 					}
 				} else {
 				} else {
 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
 				}
 				}
+				binder_inner_proc_unlock(proc);
 			}
 			}
+			binder_node_unlock(ref->node);
+			binder_proc_unlock(proc);
 		} break;
 		} break;
 		case BC_DEAD_BINDER_DONE: {
 		case BC_DEAD_BINDER_DONE: {
 			struct binder_work *w;
 			struct binder_work *w;
@@ -2587,8 +3561,13 @@ static int binder_thread_write(struct binder_proc *proc,
 				return -EFAULT;
 				return -EFAULT;
 
 
 			ptr += sizeof(cookie);
 			ptr += sizeof(cookie);
-			list_for_each_entry(w, &proc->delivered_death, entry) {
-				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+			binder_inner_proc_lock(proc);
+			list_for_each_entry(w, &proc->delivered_death,
+					    entry) {
+				struct binder_ref_death *tmp_death =
+					container_of(w,
+						     struct binder_ref_death,
+						     work);
 
 
 				if (tmp_death->cookie == cookie) {
 				if (tmp_death->cookie == cookie) {
 					death = tmp_death;
 					death = tmp_death;
@@ -2602,19 +3581,25 @@ static int binder_thread_write(struct binder_proc *proc,
 			if (death == NULL) {
 			if (death == NULL) {
 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
 					proc->pid, thread->pid, (u64)cookie);
 					proc->pid, thread->pid, (u64)cookie);
+				binder_inner_proc_unlock(proc);
 				break;
 				break;
 			}
 			}
-
-			list_del_init(&death->work.entry);
+			binder_dequeue_work_ilocked(&death->work);
 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-					list_add_tail(&death->work.entry, &thread->todo);
-				} else {
-					list_add_tail(&death->work.entry, &proc->todo);
-					wake_up_interruptible(&proc->wait);
+				if (thread->looper &
+					(BINDER_LOOPER_STATE_REGISTERED |
+					 BINDER_LOOPER_STATE_ENTERED))
+					binder_enqueue_work_ilocked(
+						&death->work, &thread->todo);
+				else {
+					binder_enqueue_work_ilocked(
+							&death->work,
+							&proc->todo);
+					binder_wakeup_proc_ilocked(proc);
 				}
 				}
 			}
 			}
+			binder_inner_proc_unlock(proc);
 		} break;
 		} break;
 
 
 		default:
 		default:
@@ -2632,23 +3617,79 @@ static void binder_stat_br(struct binder_proc *proc,
 {
 {
 	trace_binder_return(cmd);
 	trace_binder_return(cmd);
 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
-		binder_stats.br[_IOC_NR(cmd)]++;
-		proc->stats.br[_IOC_NR(cmd)]++;
-		thread->stats.br[_IOC_NR(cmd)]++;
+		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
 	}
 	}
 }
 }
 
 
-static int binder_has_proc_work(struct binder_proc *proc,
-				struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+	return !binder_worklist_empty(thread->proc, &thread->todo) ||
+		thread->looper_need_return;
+}
+
+static int binder_put_node_cmd(struct binder_proc *proc,
+			       struct binder_thread *thread,
+			       void __user **ptrp,
+			       binder_uintptr_t node_ptr,
+			       binder_uintptr_t node_cookie,
+			       int node_debug_id,
+			       uint32_t cmd, const char *cmd_name)
 {
 {
-	return !list_empty(&proc->todo) ||
-		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+	void __user *ptr = *ptrp;
+
+	if (put_user(cmd, (uint32_t __user *)ptr))
+		return -EFAULT;
+	ptr += sizeof(uint32_t);
+
+	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+		return -EFAULT;
+	ptr += sizeof(binder_uintptr_t);
+
+	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+		return -EFAULT;
+	ptr += sizeof(binder_uintptr_t);
+
+	binder_stat_br(proc, thread, cmd);
+	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+		     proc->pid, thread->pid, cmd_name, node_debug_id,
+		     (u64)node_ptr, (u64)node_cookie);
+
+	*ptrp = ptr;
+	return 0;
 }
 }
 
 
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_wait_for_work(struct binder_thread *thread,
+				bool do_proc_work)
 {
 {
-	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
-		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+	DEFINE_WAIT(wait);
+	struct binder_proc *proc = thread->proc;
+	int ret = 0;
+
+	freezer_do_not_count();
+	binder_inner_proc_lock(proc);
+	for (;;) {
+		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+		if (binder_has_work_ilocked(thread, do_proc_work))
+			break;
+		if (do_proc_work)
+			list_add(&thread->waiting_thread_node,
+				 &proc->waiting_threads);
+		binder_inner_proc_unlock(proc);
+		schedule();
+		binder_inner_proc_lock(proc);
+		list_del_init(&thread->waiting_thread_node);
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&thread->wait, &wait);
+	binder_inner_proc_unlock(proc);
+	freezer_count();
+
+	return ret;
 }
 }
 
 
 static int binder_thread_read(struct binder_proc *proc,
 static int binder_thread_read(struct binder_proc *proc,
@@ -2670,37 +3711,15 @@ static int binder_thread_read(struct binder_proc *proc,
 	}
 	}
 
 
 retry:
 retry:
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-				list_empty(&thread->todo);
-
-	if (thread->return_error != BR_OK && ptr < end) {
-		if (thread->return_error2 != BR_OK) {
-			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			binder_stat_br(proc, thread, thread->return_error2);
-			if (ptr == end)
-				goto done;
-			thread->return_error2 = BR_OK;
-		}
-		if (put_user(thread->return_error, (uint32_t __user *)ptr))
-			return -EFAULT;
-		ptr += sizeof(uint32_t);
-		binder_stat_br(proc, thread, thread->return_error);
-		thread->return_error = BR_OK;
-		goto done;
-	}
-
+	binder_inner_proc_lock(proc);
+	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+	binder_inner_proc_unlock(proc);
 
 
 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
-	if (wait_for_proc_work)
-		proc->ready_threads++;
-
-	binder_unlock(__func__);
 
 
 	trace_binder_wait_for_work(wait_for_proc_work,
 	trace_binder_wait_for_work(wait_for_proc_work,
 				   !!thread->transaction_stack,
 				   !!thread->transaction_stack,
-				   !list_empty(&thread->todo));
+				   !binder_worklist_empty(proc, &thread->todo));
 	if (wait_for_proc_work) {
 	if (wait_for_proc_work) {
 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
 					BINDER_LOOPER_STATE_ENTERED))) {
 					BINDER_LOOPER_STATE_ENTERED))) {
@@ -2710,23 +3729,15 @@ retry:
 						 binder_stop_on_user_error < 2);
 						 binder_stop_on_user_error < 2);
 		}
 		}
 		binder_set_nice(proc->default_priority);
 		binder_set_nice(proc->default_priority);
-		if (non_block) {
-			if (!binder_has_proc_work(proc, thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
-	} else {
-		if (non_block) {
-			if (!binder_has_thread_work(thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
 	}
 	}
 
 
-	binder_lock(__func__);
+	if (non_block) {
+		if (!binder_has_work(thread, wait_for_proc_work))
+			ret = -EAGAIN;
+	} else {
+		ret = binder_wait_for_work(thread, wait_for_proc_work);
+	}
 
 
-	if (wait_for_proc_work)
-		proc->ready_threads--;
 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
 
 
 	if (ret)
 	if (ret)
@@ -2735,31 +3746,52 @@ retry:
 	while (1) {
 	while (1) {
 		uint32_t cmd;
 		uint32_t cmd;
 		struct binder_transaction_data tr;
 		struct binder_transaction_data tr;
-		struct binder_work *w;
+		struct binder_work *w = NULL;
+		struct list_head *list = NULL;
 		struct binder_transaction *t = NULL;
 		struct binder_transaction *t = NULL;
+		struct binder_thread *t_from;
+
+		binder_inner_proc_lock(proc);
+		if (!binder_worklist_empty_ilocked(&thread->todo))
+			list = &thread->todo;
+		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+			   wait_for_proc_work)
+			list = &proc->todo;
+		else {
+			binder_inner_proc_unlock(proc);
 
 
-		if (!list_empty(&thread->todo)) {
-			w = list_first_entry(&thread->todo, struct binder_work,
-					     entry);
-		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
-			w = list_first_entry(&proc->todo, struct binder_work,
-					     entry);
-		} else {
 			/* no data added */
 			/* no data added */
-			if (ptr - buffer == 4 &&
-			    !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+			if (ptr - buffer == 4 && !thread->looper_need_return)
 				goto retry;
 				goto retry;
 			break;
 			break;
 		}
 		}
 
 
-		if (end - ptr < sizeof(tr) + 4)
+		if (end - ptr < sizeof(tr) + 4) {
+			binder_inner_proc_unlock(proc);
 			break;
 			break;
+		}
+		w = binder_dequeue_work_head_ilocked(list);
 
 
 		switch (w->type) {
 		switch (w->type) {
 		case BINDER_WORK_TRANSACTION: {
 		case BINDER_WORK_TRANSACTION: {
+			binder_inner_proc_unlock(proc);
 			t = container_of(w, struct binder_transaction, work);
 			t = container_of(w, struct binder_transaction, work);
 		} break;
 		} break;
+		case BINDER_WORK_RETURN_ERROR: {
+			struct binder_error *e = container_of(
+					w, struct binder_error, work);
+
+			WARN_ON(e->cmd == BR_OK);
+			binder_inner_proc_unlock(proc);
+			if (put_user(e->cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			e->cmd = BR_OK;
+			ptr += sizeof(uint32_t);
+
+			binder_stat_br(proc, thread, e->cmd);
+		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			binder_inner_proc_unlock(proc);
 			cmd = BR_TRANSACTION_COMPLETE;
 			cmd = BR_TRANSACTION_COMPLETE;
 			if (put_user(cmd, (uint32_t __user *)ptr))
 			if (put_user(cmd, (uint32_t __user *)ptr))
 				return -EFAULT;
 				return -EFAULT;
@@ -2769,113 +3801,134 @@ retry:
 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
 				     proc->pid, thread->pid);
 				     proc->pid, thread->pid);
-
-			list_del(&w->entry);
 			kfree(w);
 			kfree(w);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 		} break;
 		} break;
 		case BINDER_WORK_NODE: {
 		case BINDER_WORK_NODE: {
 			struct binder_node *node = container_of(w, struct binder_node, work);
 			struct binder_node *node = container_of(w, struct binder_node, work);
-			uint32_t cmd = BR_NOOP;
-			const char *cmd_name;
-			int strong = node->internal_strong_refs || node->local_strong_refs;
-			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
-			if (weak && !node->has_weak_ref) {
-				cmd = BR_INCREFS;
-				cmd_name = "BR_INCREFS";
+			int strong, weak;
+			binder_uintptr_t node_ptr = node->ptr;
+			binder_uintptr_t node_cookie = node->cookie;
+			int node_debug_id = node->debug_id;
+			int has_weak_ref;
+			int has_strong_ref;
+			void __user *orig_ptr = ptr;
+
+			BUG_ON(proc != node->proc);
+			strong = node->internal_strong_refs ||
+					node->local_strong_refs;
+			weak = !hlist_empty(&node->refs) ||
+					node->local_weak_refs ||
+					node->tmp_refs || strong;
+			has_strong_ref = node->has_strong_ref;
+			has_weak_ref = node->has_weak_ref;
+
+			if (weak && !has_weak_ref) {
 				node->has_weak_ref = 1;
 				node->has_weak_ref = 1;
 				node->pending_weak_ref = 1;
 				node->pending_weak_ref = 1;
 				node->local_weak_refs++;
 				node->local_weak_refs++;
-			} else if (strong && !node->has_strong_ref) {
-				cmd = BR_ACQUIRE;
-				cmd_name = "BR_ACQUIRE";
+			}
+			if (strong && !has_strong_ref) {
 				node->has_strong_ref = 1;
 				node->has_strong_ref = 1;
 				node->pending_strong_ref = 1;
 				node->pending_strong_ref = 1;
 				node->local_strong_refs++;
 				node->local_strong_refs++;
-			} else if (!strong && node->has_strong_ref) {
-				cmd = BR_RELEASE;
-				cmd_name = "BR_RELEASE";
+			}
+			if (!strong && has_strong_ref)
 				node->has_strong_ref = 0;
 				node->has_strong_ref = 0;
-			} else if (!weak && node->has_weak_ref) {
-				cmd = BR_DECREFS;
-				cmd_name = "BR_DECREFS";
+			if (!weak && has_weak_ref)
 				node->has_weak_ref = 0;
 				node->has_weak_ref = 0;
-			}
-			if (cmd != BR_NOOP) {
-				if (put_user(cmd, (uint32_t __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(uint32_t);
-				if (put_user(node->ptr,
-					     (binder_uintptr_t __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(binder_uintptr_t);
-				if (put_user(node->cookie,
-					     (binder_uintptr_t __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(binder_uintptr_t);
-
-				binder_stat_br(proc, thread, cmd);
-				binder_debug(BINDER_DEBUG_USER_REFS,
-					     "%d:%d %s %d u%016llx c%016llx\n",
-					     proc->pid, thread->pid, cmd_name,
-					     node->debug_id,
-					     (u64)node->ptr, (u64)node->cookie);
-			} else {
-				list_del_init(&w->entry);
-				if (!weak && !strong) {
-					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-						     "%d:%d node %d u%016llx c%016llx deleted\n",
-						     proc->pid, thread->pid,
-						     node->debug_id,
-						     (u64)node->ptr,
-						     (u64)node->cookie);
-					rb_erase(&node->rb_node, &proc->nodes);
-					kfree(node);
-					binder_stats_deleted(BINDER_STAT_NODE);
-				} else {
-					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-						     "%d:%d node %d u%016llx c%016llx state unchanged\n",
-						     proc->pid, thread->pid,
-						     node->debug_id,
-						     (u64)node->ptr,
-						     (u64)node->cookie);
-				}
-			}
+			if (!weak && !strong) {
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "%d:%d node %d u%016llx c%016llx deleted\n",
+					     proc->pid, thread->pid,
+					     node_debug_id,
+					     (u64)node_ptr,
+					     (u64)node_cookie);
+				rb_erase(&node->rb_node, &proc->nodes);
+				binder_inner_proc_unlock(proc);
+				binder_node_lock(node);
+				/*
+				 * Acquire the node lock before freeing the
+				 * node to serialize with other threads that
+				 * may have been holding the node lock while
+				 * decrementing this node (avoids race where
+				 * this thread frees while the other thread
+				 * is unlocking the node after the final
+				 * decrement)
+				 */
+				binder_node_unlock(node);
+				binder_free_node(node);
+			} else
+				binder_inner_proc_unlock(proc);
+
+			if (weak && !has_weak_ref)
+				ret = binder_put_node_cmd(
+						proc, thread, &ptr, node_ptr,
+						node_cookie, node_debug_id,
+						BR_INCREFS, "BR_INCREFS");
+			if (!ret && strong && !has_strong_ref)
+				ret = binder_put_node_cmd(
+						proc, thread, &ptr, node_ptr,
+						node_cookie, node_debug_id,
+						BR_ACQUIRE, "BR_ACQUIRE");
+			if (!ret && !strong && has_strong_ref)
+				ret = binder_put_node_cmd(
+						proc, thread, &ptr, node_ptr,
+						node_cookie, node_debug_id,
+						BR_RELEASE, "BR_RELEASE");
+			if (!ret && !weak && has_weak_ref)
+				ret = binder_put_node_cmd(
+						proc, thread, &ptr, node_ptr,
+						node_cookie, node_debug_id,
+						BR_DECREFS, "BR_DECREFS");
+			if (orig_ptr == ptr)
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
+					     proc->pid, thread->pid,
+					     node_debug_id,
+					     (u64)node_ptr,
+					     (u64)node_cookie);
+			if (ret)
+				return ret;
 		} break;
 		} break;
 		case BINDER_WORK_DEAD_BINDER:
 		case BINDER_WORK_DEAD_BINDER:
 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
 			struct binder_ref_death *death;
 			struct binder_ref_death *death;
 			uint32_t cmd;
 			uint32_t cmd;
+			binder_uintptr_t cookie;
 
 
 			death = container_of(w, struct binder_ref_death, work);
 			death = container_of(w, struct binder_ref_death, work);
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 			else
 				cmd = BR_DEAD_BINDER;
 				cmd = BR_DEAD_BINDER;
-			if (put_user(cmd, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie,
-				     (binder_uintptr_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(binder_uintptr_t);
-			binder_stat_br(proc, thread, cmd);
+			cookie = death->cookie;
+
 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
 				     "%d:%d %s %016llx\n",
 				     "%d:%d %s %016llx\n",
 				      proc->pid, thread->pid,
 				      proc->pid, thread->pid,
 				      cmd == BR_DEAD_BINDER ?
 				      cmd == BR_DEAD_BINDER ?
 				      "BR_DEAD_BINDER" :
 				      "BR_DEAD_BINDER" :
 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-				      (u64)death->cookie);
-
+				      (u64)cookie);
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
-				list_del(&w->entry);
+				binder_inner_proc_unlock(proc);
 				kfree(death);
 				kfree(death);
 				binder_stats_deleted(BINDER_STAT_DEATH);
 				binder_stats_deleted(BINDER_STAT_DEATH);
-			} else
-				list_move(&w->entry, &proc->delivered_death);
+			} else {
+				binder_enqueue_work_ilocked(
+						w, &proc->delivered_death);
+				binder_inner_proc_unlock(proc);
+			}
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (put_user(cookie,
+				     (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			binder_stat_br(proc, thread, cmd);
 			if (cmd == BR_DEAD_BINDER)
 			if (cmd == BR_DEAD_BINDER)
 				goto done; /* DEAD_BINDER notifications can cause transactions */
 				goto done; /* DEAD_BINDER notifications can cause transactions */
 		} break;
 		} break;
@@ -2907,8 +3960,9 @@ retry:
 		tr.flags = t->flags;
 		tr.flags = t->flags;
 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 
 
-		if (t->from) {
-			struct task_struct *sender = t->from->proc->tsk;
+		t_from = binder_get_txn_from(t);
+		if (t_from) {
+			struct task_struct *sender = t_from->proc->tsk;
 
 
 			tr.sender_pid = task_tgid_nr_ns(sender,
 			tr.sender_pid = task_tgid_nr_ns(sender,
 							task_active_pid_ns(current));
 							task_active_pid_ns(current));
@@ -2918,18 +3972,24 @@ retry:
 
 
 		tr.data_size = t->buffer->data_size;
 		tr.data_size = t->buffer->data_size;
 		tr.offsets_size = t->buffer->offsets_size;
 		tr.offsets_size = t->buffer->offsets_size;
-		tr.data.ptr.buffer = (binder_uintptr_t)(
-					(uintptr_t)t->buffer->data +
-					proc->user_buffer_offset);
+		tr.data.ptr.buffer = (binder_uintptr_t)
+			((uintptr_t)t->buffer->data +
+			binder_alloc_get_user_buffer_offset(&proc->alloc));
 		tr.data.ptr.offsets = tr.data.ptr.buffer +
 		tr.data.ptr.offsets = tr.data.ptr.buffer +
 					ALIGN(t->buffer->data_size,
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 					    sizeof(void *));
 
 
-		if (put_user(cmd, (uint32_t __user *)ptr))
+		if (put_user(cmd, (uint32_t __user *)ptr)) {
+			if (t_from)
+				binder_thread_dec_tmpref(t_from);
 			return -EFAULT;
 			return -EFAULT;
+		}
 		ptr += sizeof(uint32_t);
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
+		if (copy_to_user(ptr, &tr, sizeof(tr))) {
+			if (t_from)
+				binder_thread_dec_tmpref(t_from);
 			return -EFAULT;
 			return -EFAULT;
+		}
 		ptr += sizeof(tr);
 		ptr += sizeof(tr);
 
 
 		trace_binder_transaction_received(t);
 		trace_binder_transaction_received(t);
@@ -2939,21 +3999,22 @@ retry:
 			     proc->pid, thread->pid,
 			     proc->pid, thread->pid,
 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
 			     "BR_REPLY",
 			     "BR_REPLY",
-			     t->debug_id, t->from ? t->from->proc->pid : 0,
-			     t->from ? t->from->pid : 0, cmd,
+			     t->debug_id, t_from ? t_from->proc->pid : 0,
+			     t_from ? t_from->pid : 0, cmd,
 			     t->buffer->data_size, t->buffer->offsets_size,
 			     t->buffer->data_size, t->buffer->offsets_size,
 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
 
 
-		list_del(&t->work.entry);
+		if (t_from)
+			binder_thread_dec_tmpref(t_from);
 		t->buffer->allow_user_free = 1;
 		t->buffer->allow_user_free = 1;
 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+			binder_inner_proc_lock(thread->proc);
 			t->to_parent = thread->transaction_stack;
 			t->to_parent = thread->transaction_stack;
 			t->to_thread = thread;
 			t->to_thread = thread;
 			thread->transaction_stack = t;
 			thread->transaction_stack = t;
+			binder_inner_proc_unlock(thread->proc);
 		} else {
 		} else {
-			t->buffer->transaction = NULL;
-			kfree(t);
-			binder_stats_deleted(BINDER_STAT_TRANSACTION);
+			binder_free_transaction(t);
 		}
 		}
 		break;
 		break;
 	}
 	}
@@ -2961,29 +4022,36 @@ retry:
 done:
 done:
 
 
 	*consumed = ptr - buffer;
 	*consumed = ptr - buffer;
-	if (proc->requested_threads + proc->ready_threads == 0 &&
+	binder_inner_proc_lock(proc);
+	if (proc->requested_threads == 0 &&
+	    list_empty(&thread->proc->waiting_threads) &&
 	    proc->requested_threads_started < proc->max_threads &&
 	    proc->requested_threads_started < proc->max_threads &&
 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
 	     /*spawn a new thread if we leave this out */) {
 	     /*spawn a new thread if we leave this out */) {
 		proc->requested_threads++;
 		proc->requested_threads++;
+		binder_inner_proc_unlock(proc);
 		binder_debug(BINDER_DEBUG_THREADS,
 		binder_debug(BINDER_DEBUG_THREADS,
 			     "%d:%d BR_SPAWN_LOOPER\n",
 			     "%d:%d BR_SPAWN_LOOPER\n",
 			     proc->pid, thread->pid);
 			     proc->pid, thread->pid);
 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
 			return -EFAULT;
 			return -EFAULT;
 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
-	}
+	} else
+		binder_inner_proc_unlock(proc);
 	return 0;
 	return 0;
 }
 }
 
 
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+				struct list_head *list)
 {
 {
 	struct binder_work *w;
 	struct binder_work *w;
 
 
-	while (!list_empty(list)) {
-		w = list_first_entry(list, struct binder_work, entry);
-		list_del_init(&w->entry);
+	while (1) {
+		w = binder_dequeue_work_head(proc, list);
+		if (!w)
+			return;
+
 		switch (w->type) {
 		switch (w->type) {
 		case BINDER_WORK_TRANSACTION: {
 		case BINDER_WORK_TRANSACTION: {
 			struct binder_transaction *t;
 			struct binder_transaction *t;
@@ -2996,11 +4064,17 @@ static void binder_release_work(struct list_head *list)
 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 					"undelivered transaction %d\n",
 					"undelivered transaction %d\n",
 					t->debug_id);
 					t->debug_id);
-				t->buffer->transaction = NULL;
-				kfree(t);
-				binder_stats_deleted(BINDER_STAT_TRANSACTION);
+				binder_free_transaction(t);
 			}
 			}
 		} break;
 		} break;
+		case BINDER_WORK_RETURN_ERROR: {
+			struct binder_error *e = container_of(
+					w, struct binder_error, work);
+
+			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+				"undelivered TRANSACTION_ERROR: %u\n",
+				e->cmd);
+		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 				"undelivered TRANSACTION_COMPLETE\n");
 				"undelivered TRANSACTION_COMPLETE\n");
@@ -3027,7 +4101,8 @@ static void binder_release_work(struct list_head *list)
 
 
 }
 }
 
 
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+		struct binder_proc *proc, struct binder_thread *new_thread)
 {
 {
 	struct binder_thread *thread = NULL;
 	struct binder_thread *thread = NULL;
 	struct rb_node *parent = NULL;
 	struct rb_node *parent = NULL;
@@ -3042,38 +4117,99 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 		else if (current->pid > thread->pid)
 		else if (current->pid > thread->pid)
 			p = &(*p)->rb_right;
 			p = &(*p)->rb_right;
 		else
 		else
-			break;
+			return thread;
 	}
 	}
-	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
-		if (thread == NULL)
+	if (!new_thread)
+		return NULL;
+	thread = new_thread;
+	binder_stats_created(BINDER_STAT_THREAD);
+	thread->proc = proc;
+	thread->pid = current->pid;
+	atomic_set(&thread->tmp_ref, 0);
+	init_waitqueue_head(&thread->wait);
+	INIT_LIST_HEAD(&thread->todo);
+	rb_link_node(&thread->rb_node, parent, p);
+	rb_insert_color(&thread->rb_node, &proc->threads);
+	thread->looper_need_return = true;
+	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+	thread->return_error.cmd = BR_OK;
+	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+	thread->reply_error.cmd = BR_OK;
+	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+	return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+	struct binder_thread *thread;
+	struct binder_thread *new_thread;
+
+	binder_inner_proc_lock(proc);
+	thread = binder_get_thread_ilocked(proc, NULL);
+	binder_inner_proc_unlock(proc);
+	if (!thread) {
+		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		if (new_thread == NULL)
 			return NULL;
 			return NULL;
-		binder_stats_created(BINDER_STAT_THREAD);
-		thread->proc = proc;
-		thread->pid = current->pid;
-		init_waitqueue_head(&thread->wait);
-		INIT_LIST_HEAD(&thread->todo);
-		rb_link_node(&thread->rb_node, parent, p);
-		rb_insert_color(&thread->rb_node, &proc->threads);
-		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
-		thread->return_error = BR_OK;
-		thread->return_error2 = BR_OK;
+		binder_inner_proc_lock(proc);
+		thread = binder_get_thread_ilocked(proc, new_thread);
+		binder_inner_proc_unlock(proc);
+		if (thread != new_thread)
+			kfree(new_thread);
 	}
 	}
 	return thread;
 	return thread;
 }
 }
 
 
-static int binder_free_thread(struct binder_proc *proc,
-			      struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+	BUG_ON(!list_empty(&proc->todo));
+	BUG_ON(!list_empty(&proc->delivered_death));
+	binder_alloc_deferred_release(&proc->alloc);
+	put_task_struct(proc->tsk);
+	binder_stats_deleted(BINDER_STAT_PROC);
+	kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+	BUG_ON(!list_empty(&thread->todo));
+	binder_stats_deleted(BINDER_STAT_THREAD);
+	binder_proc_dec_tmpref(thread->proc);
+	kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+				 struct binder_thread *thread)
 {
 {
 	struct binder_transaction *t;
 	struct binder_transaction *t;
 	struct binder_transaction *send_reply = NULL;
 	struct binder_transaction *send_reply = NULL;
 	int active_transactions = 0;
 	int active_transactions = 0;
+	struct binder_transaction *last_t = NULL;
 
 
+	binder_inner_proc_lock(thread->proc);
+	/*
+	 * take a ref on the proc so it survives
+	 * after we remove this thread from proc->threads.
+	 * The corresponding dec is when we actually
+	 * free the thread in binder_free_thread()
+	 */
+	proc->tmp_ref++;
+	/*
+	 * take a ref on this thread to ensure it
+	 * survives while we are releasing it
+	 */
+	atomic_inc(&thread->tmp_ref);
 	rb_erase(&thread->rb_node, &proc->threads);
 	rb_erase(&thread->rb_node, &proc->threads);
 	t = thread->transaction_stack;
 	t = thread->transaction_stack;
-	if (t && t->to_thread == thread)
-		send_reply = t;
+	if (t) {
+		spin_lock(&t->lock);
+		if (t->to_thread == thread)
+			send_reply = t;
+	}
+	thread->is_dead = true;
+
 	while (t) {
 	while (t) {
+		last_t = t;
 		active_transactions++;
 		active_transactions++;
 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 			     "release %d:%d transaction %d %s, still active\n",
 			     "release %d:%d transaction %d %s, still active\n",
@@ -3094,12 +4230,16 @@ static int binder_free_thread(struct binder_proc *proc,
 			t = t->from_parent;
 			t = t->from_parent;
 		} else
 		} else
 			BUG();
 			BUG();
+		spin_unlock(&last_t->lock);
+		if (t)
+			spin_lock(&t->lock);
 	}
 	}
+	binder_inner_proc_unlock(thread->proc);
+
 	if (send_reply)
 	if (send_reply)
 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
-	binder_release_work(&thread->todo);
-	kfree(thread);
-	binder_stats_deleted(BINDER_STAT_THREAD);
+	binder_release_work(proc, &thread->todo);
+	binder_thread_dec_tmpref(thread);
 	return active_transactions;
 	return active_transactions;
 }
 }
 
 
@@ -3108,30 +4248,24 @@ static unsigned int binder_poll(struct file *filp,
 {
 {
 	struct binder_proc *proc = filp->private_data;
 	struct binder_proc *proc = filp->private_data;
 	struct binder_thread *thread = NULL;
 	struct binder_thread *thread = NULL;
-	int wait_for_proc_work;
-
-	binder_lock(__func__);
+	bool wait_for_proc_work;
 
 
 	thread = binder_get_thread(proc);
 	thread = binder_get_thread(proc);
 
 
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-		list_empty(&thread->todo) && thread->return_error == BR_OK;
+	binder_inner_proc_lock(thread->proc);
+	thread->looper |= BINDER_LOOPER_STATE_POLL;
+	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
 
 
-	binder_unlock(__func__);
+	binder_inner_proc_unlock(thread->proc);
+
+	if (binder_has_work(thread, wait_for_proc_work))
+		return POLLIN;
+
+	poll_wait(filp, &thread->wait, wait);
+
+	if (binder_has_thread_work(thread))
+		return POLLIN;
 
 
-	if (wait_for_proc_work) {
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-		poll_wait(filp, &proc->wait, wait);
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-	} else {
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-		poll_wait(filp, &thread->wait, wait);
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3178,8 +4312,10 @@ static int binder_ioctl_write_read(struct file *filp,
 					 &bwr.read_consumed,
 					 &bwr.read_consumed,
 					 filp->f_flags & O_NONBLOCK);
 					 filp->f_flags & O_NONBLOCK);
 		trace_binder_read_done(ret);
 		trace_binder_read_done(ret);
-		if (!list_empty(&proc->todo))
-			wake_up_interruptible(&proc->wait);
+		binder_inner_proc_lock(proc);
+		if (!binder_worklist_empty_ilocked(&proc->todo))
+			binder_wakeup_proc_ilocked(proc);
+		binder_inner_proc_unlock(proc);
 		if (ret < 0) {
 		if (ret < 0) {
 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 				ret = -EFAULT;
@@ -3204,9 +4340,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
 	int ret = 0;
 	int ret = 0;
 	struct binder_proc *proc = filp->private_data;
 	struct binder_proc *proc = filp->private_data;
 	struct binder_context *context = proc->context;
 	struct binder_context *context = proc->context;
-
+	struct binder_node *new_node;
 	kuid_t curr_euid = current_euid();
 	kuid_t curr_euid = current_euid();
 
 
+	mutex_lock(&context->context_mgr_node_lock);
 	if (context->binder_context_mgr_node) {
 	if (context->binder_context_mgr_node) {
 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
 		ret = -EBUSY;
 		ret = -EBUSY;
@@ -3227,19 +4364,49 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
 	} else {
 	} else {
 		context->binder_context_mgr_uid = curr_euid;
 		context->binder_context_mgr_uid = curr_euid;
 	}
 	}
-	context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
-	if (!context->binder_context_mgr_node) {
+	new_node = binder_new_node(proc, NULL);
+	if (!new_node) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto out;
 		goto out;
 	}
 	}
-	context->binder_context_mgr_node->local_weak_refs++;
-	context->binder_context_mgr_node->local_strong_refs++;
-	context->binder_context_mgr_node->has_strong_ref = 1;
-	context->binder_context_mgr_node->has_weak_ref = 1;
+	binder_node_lock(new_node);
+	new_node->local_weak_refs++;
+	new_node->local_strong_refs++;
+	new_node->has_strong_ref = 1;
+	new_node->has_weak_ref = 1;
+	context->binder_context_mgr_node = new_node;
+	binder_node_unlock(new_node);
+	binder_put_node(new_node);
 out:
 out:
+	mutex_unlock(&context->context_mgr_node_lock);
 	return ret;
 	return ret;
 }
 }
 
 
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+				struct binder_node_debug_info *info)
+{
+	struct rb_node *n;
+	binder_uintptr_t ptr = info->ptr;
+
+	memset(info, 0, sizeof(*info));
+
+	binder_inner_proc_lock(proc);
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+		struct binder_node *node = rb_entry(n, struct binder_node,
+						    rb_node);
+		if (node->ptr > ptr) {
+			info->ptr = node->ptr;
+			info->cookie = node->cookie;
+			info->has_strong_ref = node->has_strong_ref;
+			info->has_weak_ref = node->has_weak_ref;
+			break;
+		}
+	}
+	binder_inner_proc_unlock(proc);
+
+	return 0;
+}
+
 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 {
 	int ret;
 	int ret;
@@ -3251,13 +4418,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
 			proc->pid, current->pid, cmd, arg);*/
 			proc->pid, current->pid, cmd, arg);*/
 
 
+	binder_selftest_alloc(&proc->alloc);
+
 	trace_binder_ioctl(cmd, arg);
 	trace_binder_ioctl(cmd, arg);
 
 
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 	if (ret)
 	if (ret)
 		goto err_unlocked;
 		goto err_unlocked;
 
 
-	binder_lock(__func__);
 	thread = binder_get_thread(proc);
 	thread = binder_get_thread(proc);
 	if (thread == NULL) {
 	if (thread == NULL) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -3270,12 +4438,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
 		break;
 		break;
-	case BINDER_SET_MAX_THREADS:
-		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+	case BINDER_SET_MAX_THREADS: {
+		int max_threads;
+
+		if (copy_from_user(&max_threads, ubuf,
+				   sizeof(max_threads))) {
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto err;
 			goto err;
 		}
 		}
+		binder_inner_proc_lock(proc);
+		proc->max_threads = max_threads;
+		binder_inner_proc_unlock(proc);
 		break;
 		break;
+	}
 	case BINDER_SET_CONTEXT_MGR:
 	case BINDER_SET_CONTEXT_MGR:
 		ret = binder_ioctl_set_ctx_mgr(filp);
 		ret = binder_ioctl_set_ctx_mgr(filp);
 		if (ret)
 		if (ret)
@@ -3284,7 +4459,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	case BINDER_THREAD_EXIT:
 	case BINDER_THREAD_EXIT:
 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
 			     proc->pid, thread->pid);
 			     proc->pid, thread->pid);
-		binder_free_thread(proc, thread);
+		binder_thread_release(proc, thread);
 		thread = NULL;
 		thread = NULL;
 		break;
 		break;
 	case BINDER_VERSION: {
 	case BINDER_VERSION: {
@@ -3301,6 +4476,24 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		}
 		break;
 		break;
 	}
 	}
+	case BINDER_GET_NODE_DEBUG_INFO: {
+		struct binder_node_debug_info info;
+
+		if (copy_from_user(&info, ubuf, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		ret = binder_ioctl_get_node_debug_info(proc, &info);
+		if (ret < 0)
+			goto err;
+
+		if (copy_to_user(ubuf, &info, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		break;
+	}
 	default:
 	default:
 		ret = -EINVAL;
 		ret = -EINVAL;
 		goto err;
 		goto err;
@@ -3308,8 +4501,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	ret = 0;
 	ret = 0;
 err:
 err:
 	if (thread)
 	if (thread)
-		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
-	binder_unlock(__func__);
+		thread->looper_need_return = false;
 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 	if (ret && ret != -ERESTARTSYS)
 	if (ret && ret != -ERESTARTSYS)
 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3338,8 +4530,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
 		     proc->pid, vma->vm_start, vma->vm_end,
 		     proc->pid, vma->vm_start, vma->vm_end,
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
-	proc->vma = NULL;
-	proc->vma_vm_mm = NULL;
+	binder_alloc_vma_close(&proc->alloc);
 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 }
 }
 
 
@@ -3357,10 +4548,8 @@ static const struct vm_operations_struct binder_vm_ops = {
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 {
 	int ret;
 	int ret;
-	struct vm_struct *area;
 	struct binder_proc *proc = filp->private_data;
 	struct binder_proc *proc = filp->private_data;
 	const char *failure_string;
 	const char *failure_string;
-	struct binder_buffer *buffer;
 
 
 	if (proc->tsk != current->group_leader)
 	if (proc->tsk != current->group_leader)
 		return -EINVAL;
 		return -EINVAL;
@@ -3369,8 +4558,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 		vma->vm_end = vma->vm_start + SZ_4M;
 		vma->vm_end = vma->vm_start + SZ_4M;
 
 
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
-		     proc->pid, vma->vm_start, vma->vm_end,
+		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     __func__, proc->pid, vma->vm_start, vma->vm_end,
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
 
 
@@ -3380,73 +4569,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 		goto err_bad_arg;
 		goto err_bad_arg;
 	}
 	}
 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
 	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
-	mutex_lock(&binder_mmap_lock);
-	if (proc->buffer) {
-		ret = -EBUSY;
-		failure_string = "already mapped";
-		goto err_already_mapped;
-	}
-
-	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
-	if (area == NULL) {
-		ret = -ENOMEM;
-		failure_string = "get_vm_area";
-		goto err_get_vm_area_failed;
-	}
-	proc->buffer = area->addr;
-	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
-	mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-	if (cache_is_vipt_aliasing()) {
-		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
-			pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
-			vma->vm_start += PAGE_SIZE;
-		}
-	}
-#endif
-	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
-	if (proc->pages == NULL) {
-		ret = -ENOMEM;
-		failure_string = "alloc page array";
-		goto err_alloc_pages_failed;
-	}
-	proc->buffer_size = vma->vm_end - vma->vm_start;
-
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_private_data = proc;
 	vma->vm_private_data = proc;
 
 
-	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
-		ret = -ENOMEM;
-		failure_string = "alloc small buf";
-		goto err_alloc_small_buf_failed;
-	}
-	buffer = proc->buffer;
-	INIT_LIST_HEAD(&proc->buffers);
-	list_add(&buffer->entry, &proc->buffers);
-	buffer->free = 1;
-	binder_insert_free_buffer(proc, buffer);
-	proc->free_async_space = proc->buffer_size / 2;
-	barrier();
+	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+	if (ret)
+		return ret;
 	proc->files = get_files_struct(current);
 	proc->files = get_files_struct(current);
-	proc->vma = vma;
-	proc->vma_vm_mm = vma->vm_mm;
-
-	/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
-		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
 	return 0;
 	return 0;
 
 
-err_alloc_small_buf_failed:
-	kfree(proc->pages);
-	proc->pages = NULL;
-err_alloc_pages_failed:
-	mutex_lock(&binder_mmap_lock);
-	vfree(proc->buffer);
-	proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
-	mutex_unlock(&binder_mmap_lock);
 err_bad_arg:
 err_bad_arg:
 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3464,24 +4595,26 @@ static int binder_open(struct inode *nodp, struct file *filp)
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 	if (proc == NULL)
 	if (proc == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
+	spin_lock_init(&proc->inner_lock);
+	spin_lock_init(&proc->outer_lock);
 	get_task_struct(current->group_leader);
 	get_task_struct(current->group_leader);
 	proc->tsk = current->group_leader;
 	proc->tsk = current->group_leader;
 	INIT_LIST_HEAD(&proc->todo);
 	INIT_LIST_HEAD(&proc->todo);
-	init_waitqueue_head(&proc->wait);
 	proc->default_priority = task_nice(current);
 	proc->default_priority = task_nice(current);
 	binder_dev = container_of(filp->private_data, struct binder_device,
 	binder_dev = container_of(filp->private_data, struct binder_device,
 				  miscdev);
 				  miscdev);
 	proc->context = &binder_dev->context;
 	proc->context = &binder_dev->context;
-
-	binder_lock(__func__);
+	binder_alloc_init(&proc->alloc);
 
 
 	binder_stats_created(BINDER_STAT_PROC);
 	binder_stats_created(BINDER_STAT_PROC);
-	hlist_add_head(&proc->proc_node, &binder_procs);
 	proc->pid = current->group_leader->pid;
 	proc->pid = current->group_leader->pid;
 	INIT_LIST_HEAD(&proc->delivered_death);
 	INIT_LIST_HEAD(&proc->delivered_death);
+	INIT_LIST_HEAD(&proc->waiting_threads);
 	filp->private_data = proc;
 	filp->private_data = proc;
 
 
-	binder_unlock(__func__);
+	mutex_lock(&binder_procs_lock);
+	hlist_add_head(&proc->proc_node, &binder_procs);
+	mutex_unlock(&binder_procs_lock);
 
 
 	if (binder_debugfs_dir_entry_proc) {
 	if (binder_debugfs_dir_entry_proc) {
 		char strbuf[11];
 		char strbuf[11];
@@ -3517,16 +4650,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
 	struct rb_node *n;
 	struct rb_node *n;
 	int wake_count = 0;
 	int wake_count = 0;
 
 
+	binder_inner_proc_lock(proc);
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
 
 
-		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		thread->looper_need_return = true;
 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
 			wake_up_interruptible(&thread->wait);
 			wake_up_interruptible(&thread->wait);
 			wake_count++;
 			wake_count++;
 		}
 		}
 	}
 	}
-	wake_up_interruptible_all(&proc->wait);
+	binder_inner_proc_unlock(proc);
 
 
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 		     "binder_flush: %d woke %d threads\n", proc->pid,
 		     "binder_flush: %d woke %d threads\n", proc->pid,
@@ -3547,13 +4681,21 @@ static int binder_node_release(struct binder_node *node, int refs)
 {
 {
 	struct binder_ref *ref;
 	struct binder_ref *ref;
 	int death = 0;
 	int death = 0;
+	struct binder_proc *proc = node->proc;
 
 
-	list_del_init(&node->work.entry);
-	binder_release_work(&node->async_todo);
+	binder_release_work(proc, &node->async_todo);
 
 
-	if (hlist_empty(&node->refs)) {
-		kfree(node);
-		binder_stats_deleted(BINDER_STAT_NODE);
+	binder_node_lock(node);
+	binder_inner_proc_lock(proc);
+	binder_dequeue_work_ilocked(&node->work);
+	/*
+	 * The caller must have taken a temporary ref on the node,
+	 */
+	BUG_ON(!node->tmp_refs);
+	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+		binder_inner_proc_unlock(proc);
+		binder_node_unlock(node);
+		binder_free_node(node);
 
 
 		return refs;
 		return refs;
 	}
 	}
@@ -3561,45 +4703,58 @@ static int binder_node_release(struct binder_node *node, int refs)
 	node->proc = NULL;
 	node->proc = NULL;
 	node->local_strong_refs = 0;
 	node->local_strong_refs = 0;
 	node->local_weak_refs = 0;
 	node->local_weak_refs = 0;
+	binder_inner_proc_unlock(proc);
+
+	spin_lock(&binder_dead_nodes_lock);
 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
+	spin_unlock(&binder_dead_nodes_lock);
 
 
 	hlist_for_each_entry(ref, &node->refs, node_entry) {
 	hlist_for_each_entry(ref, &node->refs, node_entry) {
 		refs++;
 		refs++;
-
-		if (!ref->death)
+		/*
+		 * Need the node lock to synchronize
+		 * with new notification requests and the
+		 * inner lock to synchronize with queued
+		 * death notifications.
+		 */
+		binder_inner_proc_lock(ref->proc);
+		if (!ref->death) {
+			binder_inner_proc_unlock(ref->proc);
 			continue;
 			continue;
+		}
 
 
 		death++;
 		death++;
 
 
-		if (list_empty(&ref->death->work.entry)) {
-			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-			list_add_tail(&ref->death->work.entry,
-				      &ref->proc->todo);
-			wake_up_interruptible(&ref->proc->wait);
-		} else
-			BUG();
+		BUG_ON(!list_empty(&ref->death->work.entry));
+		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+		binder_enqueue_work_ilocked(&ref->death->work,
+					    &ref->proc->todo);
+		binder_wakeup_proc_ilocked(ref->proc);
+		binder_inner_proc_unlock(ref->proc);
 	}
 	}
 
 
 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
 		     "node %d now dead, refs %d, death %d\n",
 		     "node %d now dead, refs %d, death %d\n",
 		     node->debug_id, refs, death);
 		     node->debug_id, refs, death);
+	binder_node_unlock(node);
+	binder_put_node(node);
 
 
 	return refs;
 	return refs;
 }
 }
 
 
 static void binder_deferred_release(struct binder_proc *proc)
 static void binder_deferred_release(struct binder_proc *proc)
 {
 {
-	struct binder_transaction *t;
 	struct binder_context *context = proc->context;
 	struct binder_context *context = proc->context;
 	struct rb_node *n;
 	struct rb_node *n;
-	int threads, nodes, incoming_refs, outgoing_refs, buffers,
-		active_transactions, page_count;
+	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
 
 
-	BUG_ON(proc->vma);
 	BUG_ON(proc->files);
 	BUG_ON(proc->files);
 
 
+	mutex_lock(&binder_procs_lock);
 	hlist_del(&proc->proc_node);
 	hlist_del(&proc->proc_node);
+	mutex_unlock(&binder_procs_lock);
 
 
+	mutex_lock(&context->context_mgr_node_lock);
 	if (context->binder_context_mgr_node &&
 	if (context->binder_context_mgr_node &&
 	    context->binder_context_mgr_node->proc == proc) {
 	    context->binder_context_mgr_node->proc == proc) {
 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3607,15 +4762,25 @@ static void binder_deferred_release(struct binder_proc *proc)
 			     __func__, proc->pid);
 			     __func__, proc->pid);
 		context->binder_context_mgr_node = NULL;
 		context->binder_context_mgr_node = NULL;
 	}
 	}
+	mutex_unlock(&context->context_mgr_node_lock);
+	binder_inner_proc_lock(proc);
+	/*
+	 * Make sure proc stays alive after we
+	 * remove all the threads
+	 */
+	proc->tmp_ref++;
 
 
+	proc->is_dead = true;
 	threads = 0;
 	threads = 0;
 	active_transactions = 0;
 	active_transactions = 0;
 	while ((n = rb_first(&proc->threads))) {
 	while ((n = rb_first(&proc->threads))) {
 		struct binder_thread *thread;
 		struct binder_thread *thread;
 
 
 		thread = rb_entry(n, struct binder_thread, rb_node);
 		thread = rb_entry(n, struct binder_thread, rb_node);
+		binder_inner_proc_unlock(proc);
 		threads++;
 		threads++;
-		active_transactions += binder_free_thread(proc, thread);
+		active_transactions += binder_thread_release(proc, thread);
+		binder_inner_proc_lock(proc);
 	}
 	}
 
 
 	nodes = 0;
 	nodes = 0;
@@ -3625,73 +4790,42 @@ static void binder_deferred_release(struct binder_proc *proc)
 
 
 		node = rb_entry(n, struct binder_node, rb_node);
 		node = rb_entry(n, struct binder_node, rb_node);
 		nodes++;
 		nodes++;
+		/*
+		 * take a temporary ref on the node before
+		 * calling binder_node_release() which will either
+		 * kfree() the node or call binder_put_node()
+		 */
+		binder_inc_node_tmpref_ilocked(node);
 		rb_erase(&node->rb_node, &proc->nodes);
 		rb_erase(&node->rb_node, &proc->nodes);
+		binder_inner_proc_unlock(proc);
 		incoming_refs = binder_node_release(node, incoming_refs);
 		incoming_refs = binder_node_release(node, incoming_refs);
+		binder_inner_proc_lock(proc);
 	}
 	}
+	binder_inner_proc_unlock(proc);
 
 
 	outgoing_refs = 0;
 	outgoing_refs = 0;
+	binder_proc_lock(proc);
 	while ((n = rb_first(&proc->refs_by_desc))) {
 	while ((n = rb_first(&proc->refs_by_desc))) {
 		struct binder_ref *ref;
 		struct binder_ref *ref;
 
 
 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 		outgoing_refs++;
 		outgoing_refs++;
-		binder_delete_ref(ref);
-	}
-
-	binder_release_work(&proc->todo);
-	binder_release_work(&proc->delivered_death);
-
-	buffers = 0;
-	while ((n = rb_first(&proc->allocated_buffers))) {
-		struct binder_buffer *buffer;
-
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-
-		t = buffer->transaction;
-		if (t) {
-			t->buffer = NULL;
-			buffer->transaction = NULL;
-			pr_err("release proc %d, transaction %d, not freed\n",
-			       proc->pid, t->debug_id);
-			/*BUG();*/
-		}
-
-		binder_free_buf(proc, buffer);
-		buffers++;
+		binder_cleanup_ref_olocked(ref);
+		binder_proc_unlock(proc);
+		binder_free_ref(ref);
+		binder_proc_lock(proc);
 	}
 	}
+	binder_proc_unlock(proc);
 
 
-	binder_stats_deleted(BINDER_STAT_PROC);
-
-	page_count = 0;
-	if (proc->pages) {
-		int i;
-
-		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
-			void *page_addr;
-
-			if (!proc->pages[i])
-				continue;
-
-			page_addr = proc->buffer + i * PAGE_SIZE;
-			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%s: %d: page %d at %p not freed\n",
-				     __func__, proc->pid, i, page_addr);
-			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-			__free_page(proc->pages[i]);
-			page_count++;
-		}
-		kfree(proc->pages);
-		vfree(proc->buffer);
-	}
-
-	put_task_struct(proc->tsk);
+	binder_release_work(proc, &proc->todo);
+	binder_release_work(proc, &proc->delivered_death);
 
 
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
 		     __func__, proc->pid, threads, nodes, incoming_refs,
 		     __func__, proc->pid, threads, nodes, incoming_refs,
-		     outgoing_refs, active_transactions, buffers, page_count);
+		     outgoing_refs, active_transactions);
 
 
-	kfree(proc);
+	binder_proc_dec_tmpref(proc);
 }
 }
 
 
 static void binder_deferred_func(struct work_struct *work)
 static void binder_deferred_func(struct work_struct *work)
@@ -3702,7 +4836,6 @@ static void binder_deferred_func(struct work_struct *work)
 	int defer;
 	int defer;
 
 
 	do {
 	do {
-		binder_lock(__func__);
 		mutex_lock(&binder_deferred_lock);
 		mutex_lock(&binder_deferred_lock);
 		if (!hlist_empty(&binder_deferred_list)) {
 		if (!hlist_empty(&binder_deferred_list)) {
 			proc = hlist_entry(binder_deferred_list.first,
 			proc = hlist_entry(binder_deferred_list.first,
@@ -3729,7 +4862,6 @@ static void binder_deferred_func(struct work_struct *work)
 		if (defer & BINDER_DEFERRED_RELEASE)
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
 			binder_deferred_release(proc); /* frees proc */
 
 
-		binder_unlock(__func__);
 		if (files)
 		if (files)
 			put_files_struct(files);
 			put_files_struct(files);
 	} while (proc);
 	} while (proc);
@@ -3749,41 +4881,51 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
 	mutex_unlock(&binder_deferred_lock);
 	mutex_unlock(&binder_deferred_lock);
 }
 }
 
 
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
-				     struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+					     struct binder_proc *proc,
+					     const char *prefix,
+					     struct binder_transaction *t)
 {
 {
+	struct binder_proc *to_proc;
+	struct binder_buffer *buffer = t->buffer;
+
+	spin_lock(&t->lock);
+	to_proc = t->to_proc;
 	seq_printf(m,
 	seq_printf(m,
 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
 		   prefix, t->debug_id, t,
 		   prefix, t->debug_id, t,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->pid : 0,
 		   t->from ? t->from->pid : 0,
-		   t->to_proc ? t->to_proc->pid : 0,
+		   to_proc ? to_proc->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
 		   t->code, t->flags, t->priority, t->need_reply);
 		   t->code, t->flags, t->priority, t->need_reply);
-	if (t->buffer == NULL) {
+	spin_unlock(&t->lock);
+
+	if (proc != to_proc) {
+		/*
+		 * Can only safely deref buffer if we are holding the
+		 * correct proc inner lock for this node
+		 */
+		seq_puts(m, "\n");
+		return;
+	}
+
+	if (buffer == NULL) {
 		seq_puts(m, " buffer free\n");
 		seq_puts(m, " buffer free\n");
 		return;
 		return;
 	}
 	}
-	if (t->buffer->target_node)
-		seq_printf(m, " node %d",
-			   t->buffer->target_node->debug_id);
+	if (buffer->target_node)
+		seq_printf(m, " node %d", buffer->target_node->debug_id);
 	seq_printf(m, " size %zd:%zd data %p\n",
 	seq_printf(m, " size %zd:%zd data %p\n",
-		   t->buffer->data_size, t->buffer->offsets_size,
-		   t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
-				struct binder_buffer *buffer)
-{
-	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
-		   prefix, buffer->debug_id, buffer->data,
 		   buffer->data_size, buffer->offsets_size,
 		   buffer->data_size, buffer->offsets_size,
-		   buffer->transaction ? "active" : "delivered");
+		   buffer->data);
 }
 }
 
 
-static void print_binder_work(struct seq_file *m, const char *prefix,
-			      const char *transaction_prefix,
-			      struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+				     struct binder_proc *proc,
+				     const char *prefix,
+				     const char *transaction_prefix,
+				     struct binder_work *w)
 {
 {
 	struct binder_node *node;
 	struct binder_node *node;
 	struct binder_transaction *t;
 	struct binder_transaction *t;
@@ -3791,8 +4933,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
 	switch (w->type) {
 	switch (w->type) {
 	case BINDER_WORK_TRANSACTION:
 	case BINDER_WORK_TRANSACTION:
 		t = container_of(w, struct binder_transaction, work);
 		t = container_of(w, struct binder_transaction, work);
-		print_binder_transaction(m, transaction_prefix, t);
+		print_binder_transaction_ilocked(
+				m, proc, transaction_prefix, t);
 		break;
 		break;
+	case BINDER_WORK_RETURN_ERROR: {
+		struct binder_error *e = container_of(
+				w, struct binder_error, work);
+
+		seq_printf(m, "%stransaction error: %u\n",
+			   prefix, e->cmd);
+	} break;
 	case BINDER_WORK_TRANSACTION_COMPLETE:
 	case BINDER_WORK_TRANSACTION_COMPLETE:
 		seq_printf(m, "%stransaction complete\n", prefix);
 		seq_printf(m, "%stransaction complete\n", prefix);
 		break;
 		break;
@@ -3817,40 +4967,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
 	}
 	}
 }
 }
 
 
-static void print_binder_thread(struct seq_file *m,
-				struct binder_thread *thread,
-				int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+					struct binder_thread *thread,
+					int print_always)
 {
 {
 	struct binder_transaction *t;
 	struct binder_transaction *t;
 	struct binder_work *w;
 	struct binder_work *w;
 	size_t start_pos = m->count;
 	size_t start_pos = m->count;
 	size_t header_pos;
 	size_t header_pos;
 
 
-	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
+	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
+			thread->pid, thread->looper,
+			thread->looper_need_return,
+			atomic_read(&thread->tmp_ref));
 	header_pos = m->count;
 	header_pos = m->count;
 	t = thread->transaction_stack;
 	t = thread->transaction_stack;
 	while (t) {
 	while (t) {
 		if (t->from == thread) {
 		if (t->from == thread) {
-			print_binder_transaction(m,
-						 "    outgoing transaction", t);
+			print_binder_transaction_ilocked(m, thread->proc,
+					"    outgoing transaction", t);
 			t = t->from_parent;
 			t = t->from_parent;
 		} else if (t->to_thread == thread) {
 		} else if (t->to_thread == thread) {
-			print_binder_transaction(m,
+			print_binder_transaction_ilocked(m, thread->proc,
 						 "    incoming transaction", t);
 						 "    incoming transaction", t);
 			t = t->to_parent;
 			t = t->to_parent;
 		} else {
 		} else {
-			print_binder_transaction(m, "    bad transaction", t);
+			print_binder_transaction_ilocked(m, thread->proc,
+					"    bad transaction", t);
 			t = NULL;
 			t = NULL;
 		}
 		}
 	}
 	}
 	list_for_each_entry(w, &thread->todo, entry) {
 	list_for_each_entry(w, &thread->todo, entry) {
-		print_binder_work(m, "    ", "    pending transaction", w);
+		print_binder_work_ilocked(m, thread->proc, "    ",
+					  "    pending transaction", w);
 	}
 	}
 	if (!print_always && m->count == header_pos)
 	if (!print_always && m->count == header_pos)
 		m->count = start_pos;
 		m->count = start_pos;
 }
 }
 
 
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+				       struct binder_node *node)
 {
 {
 	struct binder_ref *ref;
 	struct binder_ref *ref;
 	struct binder_work *w;
 	struct binder_work *w;
@@ -3860,27 +5016,34 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
 	hlist_for_each_entry(ref, &node->refs, node_entry)
 	hlist_for_each_entry(ref, &node->refs, node_entry)
 		count++;
 		count++;
 
 
-	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->local_strong_refs, node->local_weak_refs,
 		   node->local_strong_refs, node->local_weak_refs,
-		   node->internal_strong_refs, count);
+		   node->internal_strong_refs, count, node->tmp_refs);
 	if (count) {
 	if (count) {
 		seq_puts(m, " proc");
 		seq_puts(m, " proc");
 		hlist_for_each_entry(ref, &node->refs, node_entry)
 		hlist_for_each_entry(ref, &node->refs, node_entry)
 			seq_printf(m, " %d", ref->proc->pid);
 			seq_printf(m, " %d", ref->proc->pid);
 	}
 	}
 	seq_puts(m, "\n");
 	seq_puts(m, "\n");
-	list_for_each_entry(w, &node->async_todo, entry)
-		print_binder_work(m, "    ",
-				  "    pending async transaction", w);
+	if (node->proc) {
+		list_for_each_entry(w, &node->async_todo, entry)
+			print_binder_work_ilocked(m, node->proc, "    ",
+					  "    pending async transaction", w);
+	}
 }
 }
 
 
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+				     struct binder_ref *ref)
 {
 {
-	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
-		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
-		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
+	binder_node_lock(ref->node);
+	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
+		   ref->data.debug_id, ref->data.desc,
+		   ref->node->proc ? "" : "dead ",
+		   ref->node->debug_id, ref->data.strong,
+		   ref->data.weak, ref->death);
+	binder_node_unlock(ref->node);
 }
 }
 
 
 static void print_binder_proc(struct seq_file *m,
 static void print_binder_proc(struct seq_file *m,
@@ -3890,36 +5053,60 @@ static void print_binder_proc(struct seq_file *m,
 	struct rb_node *n;
 	struct rb_node *n;
 	size_t start_pos = m->count;
 	size_t start_pos = m->count;
 	size_t header_pos;
 	size_t header_pos;
+	struct binder_node *last_node = NULL;
 
 
 	seq_printf(m, "proc %d\n", proc->pid);
 	seq_printf(m, "proc %d\n", proc->pid);
 	seq_printf(m, "context %s\n", proc->context->name);
 	seq_printf(m, "context %s\n", proc->context->name);
 	header_pos = m->count;
 	header_pos = m->count;
 
 
+	binder_inner_proc_lock(proc);
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
-		print_binder_thread(m, rb_entry(n, struct binder_thread,
+		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
 						rb_node), print_all);
 						rb_node), print_all);
+
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
 		struct binder_node *node = rb_entry(n, struct binder_node,
 		struct binder_node *node = rb_entry(n, struct binder_node,
 						    rb_node);
 						    rb_node);
-		if (print_all || node->has_async_transaction)
-			print_binder_node(m, node);
-	}
+		/*
+		 * take a temporary reference on the node so it
+		 * survives and isn't removed from the tree
+		 * while we print it.
+		 */
+		binder_inc_node_tmpref_ilocked(node);
+		/* Need to drop inner lock to take node lock */
+		binder_inner_proc_unlock(proc);
+		if (last_node)
+			binder_put_node(last_node);
+		binder_node_inner_lock(node);
+		print_binder_node_nilocked(m, node);
+		binder_node_inner_unlock(node);
+		last_node = node;
+		binder_inner_proc_lock(proc);
+	}
+	binder_inner_proc_unlock(proc);
+	if (last_node)
+		binder_put_node(last_node);
+
 	if (print_all) {
 	if (print_all) {
+		binder_proc_lock(proc);
 		for (n = rb_first(&proc->refs_by_desc);
 		for (n = rb_first(&proc->refs_by_desc);
 		     n != NULL;
 		     n != NULL;
 		     n = rb_next(n))
 		     n = rb_next(n))
-			print_binder_ref(m, rb_entry(n, struct binder_ref,
-						     rb_node_desc));
+			print_binder_ref_olocked(m, rb_entry(n,
+							    struct binder_ref,
+							    rb_node_desc));
+		binder_proc_unlock(proc);
 	}
 	}
-	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-		print_binder_buffer(m, "  buffer",
-				    rb_entry(n, struct binder_buffer, rb_node));
+	binder_alloc_print_allocated(m, &proc->alloc);
+	binder_inner_proc_lock(proc);
 	list_for_each_entry(w, &proc->todo, entry)
 	list_for_each_entry(w, &proc->todo, entry)
-		print_binder_work(m, "  ", "  pending transaction", w);
+		print_binder_work_ilocked(m, proc, "  ",
+					  "  pending transaction", w);
 	list_for_each_entry(w, &proc->delivered_death, entry) {
 	list_for_each_entry(w, &proc->delivered_death, entry) {
 		seq_puts(m, "  has delivered dead binder\n");
 		seq_puts(m, "  has delivered dead binder\n");
 		break;
 		break;
 	}
 	}
+	binder_inner_proc_unlock(proc);
 	if (!print_all && m->count == header_pos)
 	if (!print_all && m->count == header_pos)
 		m->count = start_pos;
 		m->count = start_pos;
 }
 }
@@ -3985,17 +5172,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
 		     ARRAY_SIZE(binder_command_strings));
 		     ARRAY_SIZE(binder_command_strings));
 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
-		if (stats->bc[i])
+		int temp = atomic_read(&stats->bc[i]);
+
+		if (temp)
 			seq_printf(m, "%s%s: %d\n", prefix,
 			seq_printf(m, "%s%s: %d\n", prefix,
-				   binder_command_strings[i], stats->bc[i]);
+				   binder_command_strings[i], temp);
 	}
 	}
 
 
 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
 		     ARRAY_SIZE(binder_return_strings));
 		     ARRAY_SIZE(binder_return_strings));
 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
-		if (stats->br[i])
+		int temp = atomic_read(&stats->br[i]);
+
+		if (temp)
 			seq_printf(m, "%s%s: %d\n", prefix,
 			seq_printf(m, "%s%s: %d\n", prefix,
-				   binder_return_strings[i], stats->br[i]);
+				   binder_return_strings[i], temp);
 	}
 	}
 
 
 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4003,11 +5194,15 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
 		     ARRAY_SIZE(stats->obj_deleted));
 		     ARRAY_SIZE(stats->obj_deleted));
 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
-		if (stats->obj_created[i] || stats->obj_deleted[i])
-			seq_printf(m, "%s%s: active %d total %d\n", prefix,
+		int created = atomic_read(&stats->obj_created[i]);
+		int deleted = atomic_read(&stats->obj_deleted[i]);
+
+		if (created || deleted)
+			seq_printf(m, "%s%s: active %d total %d\n",
+				prefix,
 				binder_objstat_strings[i],
 				binder_objstat_strings[i],
-				stats->obj_created[i] - stats->obj_deleted[i],
-				stats->obj_created[i]);
+				created - deleted,
+				created);
 	}
 	}
 }
 }
 
 
@@ -4015,51 +5210,61 @@ static void print_binder_proc_stats(struct seq_file *m,
 				    struct binder_proc *proc)
 				    struct binder_proc *proc)
 {
 {
 	struct binder_work *w;
 	struct binder_work *w;
+	struct binder_thread *thread;
 	struct rb_node *n;
 	struct rb_node *n;
-	int count, strong, weak;
+	int count, strong, weak, ready_threads;
+	size_t free_async_space =
+		binder_alloc_get_free_async_space(&proc->alloc);
 
 
 	seq_printf(m, "proc %d\n", proc->pid);
 	seq_printf(m, "proc %d\n", proc->pid);
 	seq_printf(m, "context %s\n", proc->context->name);
 	seq_printf(m, "context %s\n", proc->context->name);
 	count = 0;
 	count = 0;
+	ready_threads = 0;
+	binder_inner_proc_lock(proc);
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
 		count++;
 		count++;
+
+	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+		ready_threads++;
+
 	seq_printf(m, "  threads: %d\n", count);
 	seq_printf(m, "  threads: %d\n", count);
 	seq_printf(m, "  requested threads: %d+%d/%d\n"
 	seq_printf(m, "  requested threads: %d+%d/%d\n"
 			"  ready threads %d\n"
 			"  ready threads %d\n"
 			"  free async space %zd\n", proc->requested_threads,
 			"  free async space %zd\n", proc->requested_threads,
 			proc->requested_threads_started, proc->max_threads,
 			proc->requested_threads_started, proc->max_threads,
-			proc->ready_threads, proc->free_async_space);
+			ready_threads,
+			free_async_space);
 	count = 0;
 	count = 0;
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
 		count++;
 		count++;
+	binder_inner_proc_unlock(proc);
 	seq_printf(m, "  nodes: %d\n", count);
 	seq_printf(m, "  nodes: %d\n", count);
 	count = 0;
 	count = 0;
 	strong = 0;
 	strong = 0;
 	weak = 0;
 	weak = 0;
+	binder_proc_lock(proc);
 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
 						  rb_node_desc);
 						  rb_node_desc);
 		count++;
 		count++;
-		strong += ref->strong;
-		weak += ref->weak;
+		strong += ref->data.strong;
+		weak += ref->data.weak;
 	}
 	}
+	binder_proc_unlock(proc);
 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
 
 
-	count = 0;
-	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-		count++;
+	count = binder_alloc_get_allocated_count(&proc->alloc);
 	seq_printf(m, "  buffers: %d\n", count);
 	seq_printf(m, "  buffers: %d\n", count);
 
 
+	binder_alloc_print_pages(m, &proc->alloc);
+
 	count = 0;
 	count = 0;
+	binder_inner_proc_lock(proc);
 	list_for_each_entry(w, &proc->todo, entry) {
 	list_for_each_entry(w, &proc->todo, entry) {
-		switch (w->type) {
-		case BINDER_WORK_TRANSACTION:
+		if (w->type == BINDER_WORK_TRANSACTION)
 			count++;
 			count++;
-			break;
-		default:
-			break;
-		}
 	}
 	}
+	binder_inner_proc_unlock(proc);
 	seq_printf(m, "  pending transactions: %d\n", count);
 	seq_printf(m, "  pending transactions: %d\n", count);
 
 
 	print_binder_stats(m, "  ", &proc->stats);
 	print_binder_stats(m, "  ", &proc->stats);
@@ -4070,57 +5275,67 @@ static int binder_state_show(struct seq_file *m, void *unused)
 {
 {
 	struct binder_proc *proc;
 	struct binder_proc *proc;
 	struct binder_node *node;
 	struct binder_node *node;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
+	struct binder_node *last_node = NULL;
 
 
 	seq_puts(m, "binder state:\n");
 	seq_puts(m, "binder state:\n");
 
 
+	spin_lock(&binder_dead_nodes_lock);
 	if (!hlist_empty(&binder_dead_nodes))
 	if (!hlist_empty(&binder_dead_nodes))
 		seq_puts(m, "dead nodes:\n");
 		seq_puts(m, "dead nodes:\n");
-	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
-		print_binder_node(m, node);
-
+	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+		/*
+		 * take a temporary reference on the node so it
+		 * survives and isn't removed from the list
+		 * while we print it.
+		 */
+		node->tmp_refs++;
+		spin_unlock(&binder_dead_nodes_lock);
+		if (last_node)
+			binder_put_node(last_node);
+		binder_node_lock(node);
+		print_binder_node_nilocked(m, node);
+		binder_node_unlock(node);
+		last_node = node;
+		spin_lock(&binder_dead_nodes_lock);
+	}
+	spin_unlock(&binder_dead_nodes_lock);
+	if (last_node)
+		binder_put_node(last_node);
+
+	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 		print_binder_proc(m, proc, 1);
 		print_binder_proc(m, proc, 1);
-	if (do_lock)
-		binder_unlock(__func__);
+	mutex_unlock(&binder_procs_lock);
+
 	return 0;
 	return 0;
 }
 }
 
 
 static int binder_stats_show(struct seq_file *m, void *unused)
 static int binder_stats_show(struct seq_file *m, void *unused)
 {
 {
 	struct binder_proc *proc;
 	struct binder_proc *proc;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
 
 
 	seq_puts(m, "binder stats:\n");
 	seq_puts(m, "binder stats:\n");
 
 
 	print_binder_stats(m, "", &binder_stats);
 	print_binder_stats(m, "", &binder_stats);
 
 
+	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 		print_binder_proc_stats(m, proc);
 		print_binder_proc_stats(m, proc);
-	if (do_lock)
-		binder_unlock(__func__);
+	mutex_unlock(&binder_procs_lock);
+
 	return 0;
 	return 0;
 }
 }
 
 
 static int binder_transactions_show(struct seq_file *m, void *unused)
 static int binder_transactions_show(struct seq_file *m, void *unused)
 {
 {
 	struct binder_proc *proc;
 	struct binder_proc *proc;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
 
 
 	seq_puts(m, "binder transactions:\n");
 	seq_puts(m, "binder transactions:\n");
+	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
 		print_binder_proc(m, proc, 0);
 		print_binder_proc(m, proc, 0);
-	if (do_lock)
-		binder_unlock(__func__);
+	mutex_unlock(&binder_procs_lock);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -4128,44 +5343,63 @@ static int binder_proc_show(struct seq_file *m, void *unused)
 {
 {
 	struct binder_proc *itr;
 	struct binder_proc *itr;
 	int pid = (unsigned long)m->private;
 	int pid = (unsigned long)m->private;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
 
 
+	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
 		if (itr->pid == pid) {
 		if (itr->pid == pid) {
 			seq_puts(m, "binder proc state:\n");
 			seq_puts(m, "binder proc state:\n");
 			print_binder_proc(m, itr, 1);
 			print_binder_proc(m, itr, 1);
 		}
 		}
 	}
 	}
-	if (do_lock)
-		binder_unlock(__func__);
+	mutex_unlock(&binder_procs_lock);
+
 	return 0;
 	return 0;
 }
 }
 
 
 static void print_binder_transaction_log_entry(struct seq_file *m,
 static void print_binder_transaction_log_entry(struct seq_file *m,
 					struct binder_transaction_log_entry *e)
 					struct binder_transaction_log_entry *e)
 {
 {
+	int debug_id = READ_ONCE(e->debug_id_done);
+	/*
+	 * read barrier to guarantee debug_id_done read before
+	 * we print the log values
+	 */
+	smp_rmb();
 	seq_printf(m,
 	seq_printf(m,
-		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
 		   e->debug_id, (e->call_type == 2) ? "reply" :
 		   e->debug_id, (e->call_type == 2) ? "reply" :
 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
-		   e->to_node, e->target_handle, e->data_size, e->offsets_size);
+		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
+		   e->return_error, e->return_error_param,
+		   e->return_error_line);
+	/*
+	 * read-barrier to guarantee read of debug_id_done after
+	 * done printing the fields of the entry
+	 */
+	smp_rmb();
+	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+			"\n" : " (incomplete)\n");
 }
 }
 
 
 static int binder_transaction_log_show(struct seq_file *m, void *unused)
 static int binder_transaction_log_show(struct seq_file *m, void *unused)
 {
 {
 	struct binder_transaction_log *log = m->private;
 	struct binder_transaction_log *log = m->private;
+	unsigned int log_cur = atomic_read(&log->cur);
+	unsigned int count;
+	unsigned int cur;
 	int i;
 	int i;
 
 
-	if (log->full) {
-		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
-			print_binder_transaction_log_entry(m, &log->entry[i]);
+	count = log_cur + 1;
+	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+		0 : count % ARRAY_SIZE(log->entry);
+	if (count > ARRAY_SIZE(log->entry) || log->full)
+		count = ARRAY_SIZE(log->entry);
+	for (i = 0; i < count; i++) {
+		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+		print_binder_transaction_log_entry(m, &log->entry[index]);
 	}
 	}
-	for (i = 0; i < log->next; i++)
-		print_binder_transaction_log_entry(m, &log->entry[i]);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -4200,6 +5434,7 @@ static int __init init_binder_device(const char *name)
 
 
 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
 	binder_device->context.name = name;
 	binder_device->context.name = name;
+	mutex_init(&binder_device->context.context_mgr_node_lock);
 
 
 	ret = misc_register(&binder_device->miscdev);
 	ret = misc_register(&binder_device->miscdev);
 	if (ret < 0) {
 	if (ret < 0) {
@@ -4215,10 +5450,15 @@ static int __init init_binder_device(const char *name)
 static int __init binder_init(void)
 static int __init binder_init(void)
 {
 {
 	int ret;
 	int ret;
-	char *device_name, *device_names;
+	char *device_name, *device_names, *device_tmp;
 	struct binder_device *device;
 	struct binder_device *device;
 	struct hlist_node *tmp;
 	struct hlist_node *tmp;
 
 
+	binder_alloc_shrinker_init();
+
+	atomic_set(&binder_transaction_log.cur, ~0U);
+	atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
 	if (binder_debugfs_dir_entry_root)
 	if (binder_debugfs_dir_entry_root)
 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4263,7 +5503,8 @@ static int __init binder_init(void)
 	}
 	}
 	strcpy(device_names, binder_devices_param);
 	strcpy(device_names, binder_devices_param);
 
 
-	while ((device_name = strsep(&device_names, ","))) {
+	device_tmp = device_names;
+	while ((device_name = strsep(&device_tmp, ","))) {
 		ret = init_binder_device(device_name);
 		ret = init_binder_device(device_name);
 		if (ret)
 		if (ret)
 			goto err_init_binder_device_failed;
 			goto err_init_binder_device_failed;
@@ -4277,6 +5518,9 @@ err_init_binder_device_failed:
 		hlist_del(&device->hlist);
 		hlist_del(&device->hlist);
 		kfree(device);
 		kfree(device);
 	}
 	}
+
+	kfree(device_names);
+
 err_alloc_device_names_failed:
 err_alloc_device_names_failed:
 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
 
 

+ 1009 - 0
drivers/android/binder_alloc.c

@@ -0,0 +1,1009 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+struct list_lru binder_alloc_lru;
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
+	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
+	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+		   uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+	do { \
+		if (binder_alloc_debug_mask & mask) \
+			pr_info(x); \
+	} while (0)
+
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+	return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+				       struct binder_buffer *buffer)
+{
+	if (list_is_last(&buffer->entry, &alloc->buffers))
+		return (u8 *)alloc->buffer +
+			alloc->buffer_size - (u8 *)buffer->data;
+	return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+				      struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &alloc->free_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	size_t new_buffer_size;
+
+	BUG_ON(!new_buffer->free);
+
+	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: add free buffer, size %zd, at %pK\n",
+		      alloc->pid, new_buffer_size, new_buffer);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+
+		buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+		if (new_buffer_size < buffer_size)
+			p = &parent->rb_left;
+		else
+			p = &parent->rb_right;
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &alloc->allocated_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+
+	BUG_ON(new_buffer->free);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (new_buffer->data < buffer->data)
+			p = &parent->rb_left;
+		else if (new_buffer->data > buffer->data)
+			p = &parent->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+		struct binder_alloc *alloc,
+		uintptr_t user_ptr)
+{
+	struct rb_node *n = alloc->allocated_buffers.rb_node;
+	struct binder_buffer *buffer;
+	void *kern_ptr;
+
+	kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (kern_ptr < buffer->data)
+			n = n->rb_left;
+		else if (kern_ptr > buffer->data)
+			n = n->rb_right;
+		else {
+			/*
+			 * Guard against user threads attempting to
+			 * free the buffer twice
+			 */
+			if (buffer->free_in_progress) {
+				pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+				       alloc->pid, current->pid, (u64)user_ptr);
+				return NULL;
+			}
+			buffer->free_in_progress = 1;
+			return buffer;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc:	binder_alloc for this proc
+ * @user_ptr:	User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return:	Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+						   uintptr_t user_ptr)
+{
+	struct binder_buffer *buffer;
+
+	mutex_lock(&alloc->mutex);
+	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+	mutex_unlock(&alloc->mutex);
+	return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+				    void *start, void *end,
+				    struct vm_area_struct *vma)
+{
+	void *page_addr;
+	unsigned long user_page_addr;
+	struct binder_lru_page *page;
+	struct mm_struct *mm = NULL;
+	bool need_mm = false;
+
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: %s pages %pK-%pK\n", alloc->pid,
+		     allocate ? "allocate" : "free", start, end);
+
+	if (end <= start)
+		return 0;
+
+	trace_binder_update_page_range(alloc, allocate, start, end);
+
+	if (allocate == 0)
+		goto free_range;
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+		if (!page->page_ptr) {
+			need_mm = true;
+			break;
+		}
+	}
+
+	if (!vma && need_mm)
+		mm = get_task_mm(alloc->tsk);
+
+	if (mm) {
+		down_write(&mm->mmap_sem);
+		vma = alloc->vma;
+		if (vma && mm != alloc->vma_vm_mm) {
+			pr_err("%d: vma mm and task mm mismatch\n",
+				alloc->pid);
+			vma = NULL;
+		}
+	}
+
+	if (!vma && need_mm) {
+		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+			alloc->pid);
+		goto err_no_vma;
+	}
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		int ret;
+		bool on_lru;
+		size_t index;
+
+		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		page = &alloc->pages[index];
+
+		if (page->page_ptr) {
+			trace_binder_alloc_lru_start(alloc, index);
+
+			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+			WARN_ON(!on_lru);
+
+			trace_binder_alloc_lru_end(alloc, index);
+			continue;
+		}
+
+		if (WARN_ON(!vma))
+			goto err_page_ptr_cleared;
+
+		trace_binder_alloc_page_start(alloc, index);
+		page->page_ptr = alloc_page(GFP_KERNEL |
+					    __GFP_HIGHMEM |
+					    __GFP_ZERO);
+		if (!page->page_ptr) {
+			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+				alloc->pid, page_addr);
+			goto err_alloc_page_failed;
+		}
+		page->alloc = alloc;
+		INIT_LIST_HEAD(&page->lru);
+
+		ret = map_kernel_range_noflush((unsigned long)page_addr,
+					       PAGE_SIZE, PAGE_KERNEL,
+					       &page->page_ptr);
+		flush_cache_vmap((unsigned long)page_addr,
+				(unsigned long)page_addr + PAGE_SIZE);
+		if (ret != 1) {
+			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+			       alloc->pid, page_addr);
+			goto err_map_kernel_failed;
+		}
+		user_page_addr =
+			(uintptr_t)page_addr + alloc->user_buffer_offset;
+		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
+		if (ret) {
+			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+			       alloc->pid, user_page_addr);
+			goto err_vm_insert_page_failed;
+		}
+
+		trace_binder_alloc_page_end(alloc, index);
+		/* vm_insert_page does not seem to increment the refcount */
+	}
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return 0;
+
+free_range:
+	for (page_addr = end - PAGE_SIZE; page_addr >= start;
+	     page_addr -= PAGE_SIZE) {
+		bool ret;
+		size_t index;
+
+		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		page = &alloc->pages[index];
+
+		trace_binder_free_lru_start(alloc, index);
+
+		ret = list_lru_add(&binder_alloc_lru, &page->lru);
+		WARN_ON(!ret);
+
+		trace_binder_free_lru_end(alloc, index);
+		continue;
+
+err_vm_insert_page_failed:
+		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+		__free_page(page->page_ptr);
+		page->page_ptr = NULL;
+err_alloc_page_failed:
+err_page_ptr_cleared:
+		;
+	}
+err_no_vma:
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+						  size_t data_size,
+						  size_t offsets_size,
+						  size_t extra_buffers_size,
+						  int is_async)
+{
+	struct rb_node *n = alloc->free_buffers.rb_node;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	struct rb_node *best_fit = NULL;
+	void *has_page_addr;
+	void *end_page_addr;
+	size_t size, data_offsets_size;
+	int ret;
+
+	if (alloc->vma == NULL) {
+		pr_err("%d: binder_alloc_buf, no vma\n",
+		       alloc->pid);
+		return ERR_PTR(-ESRCH);
+	}
+
+	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+		ALIGN(offsets_size, sizeof(void *));
+
+	if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				"%d: got transaction with invalid size %zd-%zd\n",
+				alloc->pid, data_size, offsets_size);
+		return ERR_PTR(-EINVAL);
+	}
+	size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+	if (size < data_offsets_size || size < extra_buffers_size) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				"%d: got transaction with invalid extra_buffers_size %zd\n",
+				alloc->pid, extra_buffers_size);
+		return ERR_PTR(-EINVAL);
+	}
+	if (is_async &&
+	    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
+			      alloc->pid, size);
+		return ERR_PTR(-ENOSPC);
+	}
+
+	/* Pad 0-size buffers so they get assigned unique addresses */
+	size = max(size, sizeof(void *));
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+		buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+		if (size < buffer_size) {
+			best_fit = n;
+			n = n->rb_left;
+		} else if (size > buffer_size)
+			n = n->rb_right;
+		else {
+			best_fit = n;
+			break;
+		}
+	}
+	if (best_fit == NULL) {
+		size_t allocated_buffers = 0;
+		size_t largest_alloc_size = 0;
+		size_t total_alloc_size = 0;
+		size_t free_buffers = 0;
+		size_t largest_free_size = 0;
+		size_t total_free_size = 0;
+
+		for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+		     n = rb_next(n)) {
+			buffer = rb_entry(n, struct binder_buffer, rb_node);
+			buffer_size = binder_alloc_buffer_size(alloc, buffer);
+			allocated_buffers++;
+			total_alloc_size += buffer_size;
+			if (buffer_size > largest_alloc_size)
+				largest_alloc_size = buffer_size;
+		}
+		for (n = rb_first(&alloc->free_buffers); n != NULL;
+		     n = rb_next(n)) {
+			buffer = rb_entry(n, struct binder_buffer, rb_node);
+			buffer_size = binder_alloc_buffer_size(alloc, buffer);
+			free_buffers++;
+			total_free_size += buffer_size;
+			if (buffer_size > largest_free_size)
+				largest_free_size = buffer_size;
+		}
+		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+			alloc->pid, size);
+		pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+		       total_alloc_size, allocated_buffers, largest_alloc_size,
+		       total_free_size, free_buffers, largest_free_size);
+		return ERR_PTR(-ENOSPC);
+	}
+	if (n == NULL) {
+		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+		buffer_size = binder_alloc_buffer_size(alloc, buffer);
+	}
+
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+		      alloc->pid, size, buffer, buffer_size);
+
+	has_page_addr =
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+	WARN_ON(n && buffer_size != size);
+	end_page_addr =
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+	if (end_page_addr > has_page_addr)
+		end_page_addr = has_page_addr;
+	ret = binder_update_page_range(alloc, 1,
+	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (buffer_size != size) {
+		struct binder_buffer *new_buffer;
+
+		new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+		if (!new_buffer) {
+			pr_err("%s: %d failed to alloc new buffer struct\n",
+			       __func__, alloc->pid);
+			goto err_alloc_buf_struct_failed;
+		}
+		new_buffer->data = (u8 *)buffer->data + size;
+		list_add(&new_buffer->entry, &buffer->entry);
+		new_buffer->free = 1;
+		binder_insert_free_buffer(alloc, new_buffer);
+	}
+
+	rb_erase(best_fit, &alloc->free_buffers);
+	buffer->free = 0;
+	buffer->free_in_progress = 0;
+	binder_insert_allocated_buffer_locked(alloc, buffer);
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_alloc_buf size %zd got %pK\n",
+		      alloc->pid, size, buffer);
+	buffer->data_size = data_size;
+	buffer->offsets_size = offsets_size;
+	buffer->async_transaction = is_async;
+	buffer->extra_buffers_size = extra_buffers_size;
+	if (is_async) {
+		alloc->free_async_space -= size + sizeof(struct binder_buffer);
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "%d: binder_alloc_buf size %zd async free %zd\n",
+			      alloc->pid, size, alloc->free_async_space);
+	}
+	return buffer;
+
+err_alloc_buf_struct_failed:
+	binder_update_page_range(alloc, 0,
+				 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+				 end_page_addr, NULL);
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc:              binder_alloc for this proc
+ * @data_size:          size of user data buffer
+ * @offsets_size:       user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async:           buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return:	The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+					   size_t data_size,
+					   size_t offsets_size,
+					   size_t extra_buffers_size,
+					   int is_async)
+{
+	struct binder_buffer *buffer;
+
+	mutex_lock(&alloc->mutex);
+	buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+					     extra_buffers_size, is_async);
+	mutex_unlock(&alloc->mutex);
+	return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+	return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+}
+
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
+{
+	return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+				      struct binder_buffer *buffer)
+{
+	struct binder_buffer *prev, *next = NULL;
+	bool to_free = true;
+	BUG_ON(alloc->buffers.next == &buffer->entry);
+	prev = binder_buffer_prev(buffer);
+	BUG_ON(!prev->free);
+	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+		to_free = false;
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				   "%d: merge free, buffer %pK share page with %pK\n",
+				   alloc->pid, buffer->data, prev->data);
+	}
+
+	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+		next = binder_buffer_next(buffer);
+		if (buffer_start_page(next) == buffer_start_page(buffer)) {
+			to_free = false;
+			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+					   "%d: merge free, buffer %pK share page with %pK\n",
+					   alloc->pid,
+					   buffer->data,
+					   next->data);
+		}
+	}
+
+	if (PAGE_ALIGNED(buffer->data)) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				   "%d: merge free, buffer start %pK is page aligned\n",
+				   alloc->pid, buffer->data);
+		to_free = false;
+	}
+
+	if (to_free) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+				   alloc->pid, buffer->data,
+				   prev->data, next->data);
+		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+					 buffer_start_page(buffer) + PAGE_SIZE,
+					 NULL);
+	}
+	list_del(&buffer->entry);
+	kfree(buffer);
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+				   struct binder_buffer *buffer)
+{
+	size_t size, buffer_size;
+
+	buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+	size = ALIGN(buffer->data_size, sizeof(void *)) +
+		ALIGN(buffer->offsets_size, sizeof(void *)) +
+		ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+		      alloc->pid, buffer, size, buffer_size);
+
+	BUG_ON(buffer->free);
+	BUG_ON(size > buffer_size);
+	BUG_ON(buffer->transaction != NULL);
+	BUG_ON(buffer->data < alloc->buffer);
+	BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+
+	if (buffer->async_transaction) {
+		alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "%d: binder_free_buf size %zd async free %zd\n",
+			      alloc->pid, size, alloc->free_async_space);
+	}
+
+	binder_update_page_range(alloc, 0,
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+		NULL);
+
+	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+	buffer->free = 1;
+	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+		struct binder_buffer *next = binder_buffer_next(buffer);
+
+		if (next->free) {
+			rb_erase(&next->rb_node, &alloc->free_buffers);
+			binder_delete_free_buffer(alloc, next);
+		}
+	}
+	if (alloc->buffers.next != &buffer->entry) {
+		struct binder_buffer *prev = binder_buffer_prev(buffer);
+
+		if (prev->free) {
+			binder_delete_free_buffer(alloc, buffer);
+			rb_erase(&prev->rb_node, &alloc->free_buffers);
+			buffer = prev;
+		}
+	}
+	binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc:	binder_alloc for this proc
+ * @buffer:	kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+			    struct binder_buffer *buffer)
+{
+	mutex_lock(&alloc->mutex);
+	binder_free_buf_locked(alloc, buffer);
+	mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc:	alloc structure for this proc
+ * @vma:	vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ *      0 = success
+ *      -EBUSY = address space already mapped
+ *      -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+			      struct vm_area_struct *vma)
+{
+	int ret;
+	struct vm_struct *area;
+	const char *failure_string;
+	struct binder_buffer *buffer;
+
+	mutex_lock(&binder_alloc_mmap_lock);
+	if (alloc->buffer) {
+		ret = -EBUSY;
+		failure_string = "already mapped";
+		goto err_already_mapped;
+	}
+
+	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+	if (area == NULL) {
+		ret = -ENOMEM;
+		failure_string = "get_vm_area";
+		goto err_get_vm_area_failed;
+	}
+	alloc->buffer = area->addr;
+	alloc->user_buffer_offset =
+		vma->vm_start - (uintptr_t)alloc->buffer;
+	mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+	if (cache_is_vipt_aliasing()) {
+		while (CACHE_COLOUR(
+				(vma->vm_start ^ (uint32_t)alloc->buffer))) {
+			pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+				__func__, alloc->pid, vma->vm_start,
+				vma->vm_end, alloc->buffer);
+			vma->vm_start += PAGE_SIZE;
+		}
+	}
+#endif
+	alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+				   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+			       GFP_KERNEL);
+	if (alloc->pages == NULL) {
+		ret = -ENOMEM;
+		failure_string = "alloc page array";
+		goto err_alloc_pages_failed;
+	}
+	alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer) {
+		ret = -ENOMEM;
+		failure_string = "alloc buffer struct";
+		goto err_alloc_buf_struct_failed;
+	}
+
+	buffer->data = alloc->buffer;
+	list_add(&buffer->entry, &alloc->buffers);
+	buffer->free = 1;
+	binder_insert_free_buffer(alloc, buffer);
+	alloc->free_async_space = alloc->buffer_size / 2;
+	barrier();
+	alloc->vma = vma;
+	alloc->vma_vm_mm = vma->vm_mm;
+
+	return 0;
+
+err_alloc_buf_struct_failed:
+	kfree(alloc->pages);
+	alloc->pages = NULL;
+err_alloc_pages_failed:
+	mutex_lock(&binder_alloc_mmap_lock);
+	vfree(alloc->buffer);
+	alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+	mutex_unlock(&binder_alloc_mmap_lock);
+	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+	       alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+	return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+	struct rb_node *n;
+	int buffers, page_count;
+	struct binder_buffer *buffer;
+
+	BUG_ON(alloc->vma);
+
+	buffers = 0;
+	mutex_lock(&alloc->mutex);
+	while ((n = rb_first(&alloc->allocated_buffers))) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+		/* Transaction should already have been freed */
+		BUG_ON(buffer->transaction);
+
+		binder_free_buf_locked(alloc, buffer);
+		buffers++;
+	}
+
+	while (!list_empty(&alloc->buffers)) {
+		buffer = list_first_entry(&alloc->buffers,
+					  struct binder_buffer, entry);
+		WARN_ON(!buffer->free);
+
+		list_del(&buffer->entry);
+		WARN_ON_ONCE(!list_empty(&alloc->buffers));
+		kfree(buffer);
+	}
+
+	page_count = 0;
+	if (alloc->pages) {
+		int i;
+
+		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+			void *page_addr;
+			bool on_lru;
+
+			if (!alloc->pages[i].page_ptr)
+				continue;
+
+			on_lru = list_lru_del(&binder_alloc_lru,
+					      &alloc->pages[i].lru);
+			page_addr = alloc->buffer + i * PAGE_SIZE;
+			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				     "%s: %d: page %d at %pK %s\n",
+				     __func__, alloc->pid, i, page_addr,
+				     on_lru ? "on lru" : "active");
+			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+			__free_page(alloc->pages[i].page_ptr);
+			page_count++;
+		}
+		kfree(alloc->pages);
+		vfree(alloc->buffer);
+	}
+	mutex_unlock(&alloc->mutex);
+
+	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "%s: %d buffers %d, pages %d\n",
+		     __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+				struct binder_buffer *buffer)
+{
+	seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+		   prefix, buffer->debug_id, buffer->data,
+		   buffer->data_size, buffer->offsets_size,
+		   buffer->extra_buffers_size,
+		   buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m:     seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+				  struct binder_alloc *alloc)
+{
+	struct rb_node *n;
+
+	mutex_lock(&alloc->mutex);
+	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+		print_binder_buffer(m, "  buffer",
+				    rb_entry(n, struct binder_buffer, rb_node));
+	mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_print_pages() - print page usage
+ * @m:     seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+			      struct binder_alloc *alloc)
+{
+	struct binder_lru_page *page;
+	int i;
+	int active = 0;
+	int lru = 0;
+	int free = 0;
+
+	mutex_lock(&alloc->mutex);
+	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+		page = &alloc->pages[i];
+		if (!page->page_ptr)
+			free++;
+		else if (list_empty(&page->lru))
+			active++;
+		else
+			lru++;
+	}
+	mutex_unlock(&alloc->mutex);
+	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+	struct rb_node *n;
+	int count = 0;
+
+	mutex_lock(&alloc->mutex);
+	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+		count++;
+	mutex_unlock(&alloc->mutex);
+	return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+	WRITE_ONCE(alloc->vma, NULL);
+	WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item:   item to free
+ * @lock:   lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+				       struct list_lru_one *lru,
+				       spinlock_t *lock,
+				       void *cb_arg)
+{
+	struct mm_struct *mm = NULL;
+	struct binder_lru_page *page = container_of(item,
+						    struct binder_lru_page,
+						    lru);
+	struct binder_alloc *alloc;
+	uintptr_t page_addr;
+	size_t index;
+
+	alloc = page->alloc;
+	if (!mutex_trylock(&alloc->mutex))
+		goto err_get_alloc_mutex_failed;
+
+	if (!page->page_ptr)
+		goto err_page_already_freed;
+
+	index = page - alloc->pages;
+	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+	if (alloc->vma) {
+		mm = get_task_mm(alloc->tsk);
+		if (!mm)
+			goto err_get_task_mm_failed;
+		if (!down_write_trylock(&mm->mmap_sem))
+			goto err_down_write_mmap_sem_failed;
+
+		trace_binder_unmap_user_start(alloc, index);
+
+		zap_page_range(alloc->vma,
+			       page_addr + alloc->user_buffer_offset,
+			       PAGE_SIZE);
+
+		trace_binder_unmap_user_end(alloc, index);
+
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+
+	trace_binder_unmap_kernel_start(alloc, index);
+
+	unmap_kernel_range(page_addr, PAGE_SIZE);
+	__free_page(page->page_ptr);
+	page->page_ptr = NULL;
+
+	trace_binder_unmap_kernel_end(alloc, index);
+
+	list_lru_isolate(lru, item);
+
+	mutex_unlock(&alloc->mutex);
+	return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+	mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+	mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+	return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+	unsigned long ret = list_lru_count(&binder_alloc_lru);
+	return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+	unsigned long ret;
+
+	ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+			    NULL, sc->nr_to_scan);
+	return ret;
+}
+
+struct shrinker binder_shrinker = {
+	.count_objects = binder_shrink_count,
+	.scan_objects = binder_shrink_scan,
+	.seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+	alloc->tsk = current->group_leader;
+	alloc->pid = current->group_leader->pid;
+	mutex_init(&alloc->mutex);
+	INIT_LIST_HEAD(&alloc->buffers);
+}
+
+void binder_alloc_shrinker_init(void)
+{
+	list_lru_init(&binder_alloc_lru);
+	register_shrinker(&binder_shrinker);
+}

+ 187 - 0
drivers/android/binder_alloc.h

@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list_lru.h>
+
+extern struct list_lru binder_alloc_lru;
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry:              entry alloc->buffers
+ * @rb_node:            node for allocated_buffers/free_buffers rb trees
+ * @free:               true if buffer is free
+ * @allow_user_free:    describe the second member of struct blah,
+ * @async_transaction:  describe the second member of struct blah,
+ * @debug_id:           describe the second member of struct blah,
+ * @transaction:        describe the second member of struct blah,
+ * @target_node:        describe the second member of struct blah,
+ * @data_size:          describe the second member of struct blah,
+ * @offsets_size:       describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i              describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+	struct list_head entry; /* free and allocated entries by address */
+	struct rb_node rb_node; /* free entry by size or allocated entry */
+				/* by address */
+	unsigned free:1;
+	unsigned allow_user_free:1;
+	unsigned async_transaction:1;
+	unsigned free_in_progress:1;
+	unsigned debug_id:28;
+
+	struct binder_transaction *transaction;
+
+	struct binder_node *target_node;
+	size_t data_size;
+	size_t offsets_size;
+	size_t extra_buffers_size;
+	void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru:      entry in binder_alloc_lru
+ * @alloc:    binder_alloc for a proc
+ */
+struct binder_lru_page {
+	struct list_head lru;
+	struct page *page_ptr;
+	struct binder_alloc *alloc;
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma:                vm_area_struct passed to mmap_handler
+ *                      (invarient after mmap)
+ * @tsk:                tid for task that called init for this proc
+ *                      (invariant after init)
+ * @vma_vm_mm:          copy of vma->vm_mm (invarient after mmap)
+ * @buffer:             base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers:            list of all buffers for this proc
+ * @free_buffers:       rb tree of buffers available for allocation
+ *                      sorted by size
+ * @allocated_buffers:  rb tree of allocated buffers sorted by address
+ * @free_async_space:   VA space available for async buffers. This is
+ *                      initialized at mmap time to 1/2 the full VA space
+ * @pages:              array of binder_lru_page
+ * @buffer_size:        size of address space specified via mmap
+ * @pid:                pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+	struct mutex mutex;
+	struct task_struct *tsk;
+	struct vm_area_struct *vma;
+	struct mm_struct *vma_vm_mm;
+	void *buffer;
+	ptrdiff_t user_buffer_offset;
+	struct list_head buffers;
+	struct rb_root free_buffers;
+	struct rb_root allocated_buffers;
+	size_t free_async_space;
+	struct binder_lru_page *pages;
+	size_t buffer_size;
+	uint32_t buffer_free;
+	int pid;
+};
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+				       struct list_lru_one *lru,
+				       spinlock_t *lock, void *cb_arg);
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+						  size_t data_size,
+						  size_t offsets_size,
+						  size_t extra_buffers_size,
+						  int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+			     uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+				  struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+				     struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+					 struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+			      struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc:	binder_alloc for this proc
+ *
+ * Return:	the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+	size_t free_async_space;
+
+	mutex_lock(&alloc->mutex);
+	free_async_space = alloc->free_async_space;
+	mutex_unlock(&alloc->mutex);
+	return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc:	binder_alloc for this proc
+ *
+ * Return:	the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+	/*
+	 * user_buffer_offset is constant if vma is set and
+	 * undefined if vma is not set. It is possible to
+	 * get here with !alloc->vma if the target process
+	 * is dying while a transaction is being initiated.
+	 * Returning the old value is ok in this case and
+	 * the transaction will fail.
+	 */
+	return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+

+ 310 - 0
drivers/android/binder_alloc_selftest.c

@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+	/**
+	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+	 * the same page as the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 ][ ...
+	 * buf1 ]|[ buf2 ][ ...
+	 */
+	SAME_PAGE_UNALIGNED = 0,
+	/**
+	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+	 * is not page aligned, the end of this buffer is on the
+	 * same page as the end of the previous buffer and is page
+	 * aligned. When the previous buffer is page aligned, the
+	 * end of this buffer is aligned to the next page boundary.
+	 * Examples:
+	 * buf1 ][ buf2 ]| ...
+	 * buf1 ]|[ buf2 ]| ...
+	 */
+	SAME_PAGE_ALIGNED,
+	/**
+	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 ][ ...
+	 */
+	NEXT_PAGE_UNALIGNED,
+	/**
+	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ]| ...
+	 * buf1 ]|[ buf2 | buf2 ]| ...
+	 */
+	NEXT_PAGE_ALIGNED,
+	/**
+	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+	 * the page that follows the page after the end of the
+	 * previous buffer and is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+	 */
+	NEXT_NEXT_UNALIGNED,
+	LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+	int i;
+
+	pr_err("alloc sizes: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%zu]", sizes[i]);
+	pr_cont("\n");
+	pr_err("free seq: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%d]", seq[i]);
+	pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+					 struct binder_buffer *buffer,
+					 size_t size)
+{
+	void *page_addr, *end;
+	int page_index;
+
+	end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+	page_addr = buffer->data;
+	for (; page_addr < end; page_addr += PAGE_SIZE) {
+		page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		if (!alloc->pages[page_index].page_ptr ||
+		    !list_empty(&alloc->pages[page_index].lru)) {
+			pr_err("expect alloc but is %s at page index %d\n",
+			       alloc->pages[page_index].page_ptr ?
+			       "lru" : "free", page_index);
+			return false;
+		}
+	}
+	return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+				      struct binder_buffer *buffers[],
+				      size_t *sizes, int *seq)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+		if (IS_ERR(buffers[i]) ||
+		    !check_buffer_pages_allocated(alloc, buffers[i],
+						  sizes[i])) {
+			pr_err_size_seq(sizes, seq);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+				     struct binder_buffer *buffers[],
+				     size_t *sizes, int *seq, size_t end)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++)
+		binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+	for (i = 0; i < end / PAGE_SIZE; i++) {
+		/**
+		 * Error message on a free page can be false positive
+		 * if binder shrinker ran during binder_alloc_free_buf
+		 * calls above.
+		 */
+		if (list_empty(&alloc->pages[i].lru)) {
+			pr_err_size_seq(sizes, seq);
+			pr_err("expect lru but is %s at page index %d\n",
+			       alloc->pages[i].page_ptr ? "alloc" : "free", i);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+	int i;
+	unsigned long count;
+
+	while ((count = list_lru_count(&binder_alloc_lru))) {
+		list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+			      NULL, count);
+	}
+
+	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+		if (alloc->pages[i].page_ptr) {
+			pr_err("expect free but is %s at page index %d\n",
+			       list_empty(&alloc->pages[i].lru) ?
+			       "alloc" : "lru", i);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+				       size_t *sizes, int *seq, size_t end)
+{
+	struct binder_buffer *buffers[BUFFER_NUM];
+
+	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+	/* Allocate from lru. */
+	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+	if (list_lru_count(&binder_alloc_lru))
+		pr_err("lru list should be empty but is not\n");
+
+	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+	binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+	int i;
+
+	for (i = 0; i < index; i++) {
+		if (seq[i] == val)
+			return true;
+	}
+	return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+				     size_t *sizes, int *seq,
+				     int index, size_t end)
+{
+	int i;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_free(alloc, sizes, seq, end);
+		return;
+	}
+	for (i = 0; i < BUFFER_NUM; i++) {
+		if (is_dup(seq, index, i))
+			continue;
+		seq[index] = i;
+		binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+	}
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+				       size_t *end_offset)
+{
+	int i;
+	int seq[BUFFER_NUM] = {0};
+	size_t front_sizes[BUFFER_NUM];
+	size_t back_sizes[BUFFER_NUM];
+	size_t last_offset, offset = 0;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		last_offset = offset;
+		offset = end_offset[i];
+		front_sizes[i] = offset - last_offset;
+		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+	}
+	/*
+	 * Buffers share the first or last few pages.
+	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+	 * we need one giant buffer before getting to the last page.
+	 */
+	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+	binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+				 end_offset[BUFFER_NUM - 1]);
+	binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+					 size_t *end_offset, int index)
+{
+	int align;
+	size_t end, prev;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_size(alloc, end_offset);
+		return;
+	}
+	prev = index == 0 ? 0 : end_offset[index - 1];
+	end = prev;
+
+	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+		if (align % 2)
+			end = ALIGN(end, PAGE_SIZE);
+		else
+			end += BUFFER_MIN_SIZE;
+		end_offset[index] = end;
+		binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+	}
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+	size_t end_offset[BUFFER_NUM];
+
+	if (!binder_selftest_run)
+		return;
+	mutex_lock(&binder_selftest_lock);
+	if (!binder_selftest_run || !alloc->vma)
+		goto done;
+	pr_info("STARTED\n");
+	binder_selftest_alloc_offset(alloc, end_offset, 0);
+	binder_selftest_run = false;
+	if (binder_selftest_failures > 0)
+		pr_info("%d tests FAILED\n", binder_selftest_failures);
+	else
+		pr_info("PASSED\n");
+
+done:
+	mutex_unlock(&binder_selftest_lock);
+}

+ 77 - 19
drivers/android/binder_trace.h

@@ -23,7 +23,8 @@
 struct binder_buffer;
 struct binder_buffer;
 struct binder_node;
 struct binder_node;
 struct binder_proc;
 struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
 struct binder_thread;
 struct binder_thread;
 struct binder_transaction;
 struct binder_transaction;
 
 
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
 
 
 TRACE_EVENT(binder_transaction_node_to_ref,
 TRACE_EVENT(binder_transaction_node_to_ref,
 	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
 	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
-		 struct binder_ref *ref),
-	TP_ARGS(t, node, ref),
+		 struct binder_ref_data *rdata),
+	TP_ARGS(t, node, rdata),
 
 
 	TP_STRUCT__entry(
 	TP_STRUCT__entry(
 		__field(int, debug_id)
 		__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
 		__entry->debug_id = t->debug_id;
 		__entry->debug_id = t->debug_id;
 		__entry->node_debug_id = node->debug_id;
 		__entry->node_debug_id = node->debug_id;
 		__entry->node_ptr = node->ptr;
 		__entry->node_ptr = node->ptr;
-		__entry->ref_debug_id = ref->debug_id;
-		__entry->ref_desc = ref->desc;
+		__entry->ref_debug_id = rdata->debug_id;
+		__entry->ref_desc = rdata->desc;
 	),
 	),
 	TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
 	TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
 		  __entry->debug_id, __entry->node_debug_id,
 		  __entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
 );
 );
 
 
 TRACE_EVENT(binder_transaction_ref_to_node,
 TRACE_EVENT(binder_transaction_ref_to_node,
-	TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
-	TP_ARGS(t, ref),
+	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+		 struct binder_ref_data *rdata),
+	TP_ARGS(t, node, rdata),
 
 
 	TP_STRUCT__entry(
 	TP_STRUCT__entry(
 		__field(int, debug_id)
 		__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
 	),
 	),
 	TP_fast_assign(
 	TP_fast_assign(
 		__entry->debug_id = t->debug_id;
 		__entry->debug_id = t->debug_id;
-		__entry->ref_debug_id = ref->debug_id;
-		__entry->ref_desc = ref->desc;
-		__entry->node_debug_id = ref->node->debug_id;
-		__entry->node_ptr = ref->node->ptr;
+		__entry->ref_debug_id = rdata->debug_id;
+		__entry->ref_desc = rdata->desc;
+		__entry->node_debug_id = node->debug_id;
+		__entry->node_ptr = node->ptr;
 	),
 	),
 	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
 	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
 		  __entry->debug_id, __entry->node_debug_id,
 		  __entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
 );
 );
 
 
 TRACE_EVENT(binder_transaction_ref_to_ref,
 TRACE_EVENT(binder_transaction_ref_to_ref,
-	TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
-		 struct binder_ref *dest_ref),
-	TP_ARGS(t, src_ref, dest_ref),
+	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+		 struct binder_ref_data *src_ref,
+		 struct binder_ref_data *dest_ref),
+	TP_ARGS(t, node, src_ref, dest_ref),
 
 
 	TP_STRUCT__entry(
 	TP_STRUCT__entry(
 		__field(int, debug_id)
 		__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
 	),
 	),
 	TP_fast_assign(
 	TP_fast_assign(
 		__entry->debug_id = t->debug_id;
 		__entry->debug_id = t->debug_id;
-		__entry->node_debug_id = src_ref->node->debug_id;
+		__entry->node_debug_id = node->debug_id;
 		__entry->src_ref_debug_id = src_ref->debug_id;
 		__entry->src_ref_debug_id = src_ref->debug_id;
 		__entry->src_ref_desc = src_ref->desc;
 		__entry->src_ref_desc = src_ref->desc;
 		__entry->dest_ref_debug_id = dest_ref->debug_id;
 		__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
 	TP_ARGS(buffer));
 	TP_ARGS(buffer));
 
 
 TRACE_EVENT(binder_update_page_range,
 TRACE_EVENT(binder_update_page_range,
-	TP_PROTO(struct binder_proc *proc, bool allocate,
+	TP_PROTO(struct binder_alloc *alloc, bool allocate,
 		 void *start, void *end),
 		 void *start, void *end),
-	TP_ARGS(proc, allocate, start, end),
+	TP_ARGS(alloc, allocate, start, end),
 	TP_STRUCT__entry(
 	TP_STRUCT__entry(
 		__field(int, proc)
 		__field(int, proc)
 		__field(bool, allocate)
 		__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
 		__field(size_t, size)
 		__field(size_t, size)
 	),
 	),
 	TP_fast_assign(
 	TP_fast_assign(
-		__entry->proc = proc->pid;
+		__entry->proc = alloc->pid;
 		__entry->allocate = allocate;
 		__entry->allocate = allocate;
-		__entry->offset = start - proc->buffer;
+		__entry->offset = start - alloc->buffer;
 		__entry->size = end - start;
 		__entry->size = end - start;
 	),
 	),
 	TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
 	TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -288,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
 		  __entry->offset, __entry->size)
 		  __entry->offset, __entry->size)
 );
 );
 
 
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index),
+	TP_STRUCT__entry(
+		__field(int, proc)
+		__field(size_t, page_index)
+	),
+	TP_fast_assign(
+		__entry->proc = alloc->pid;
+		__entry->page_index = page_index;
+	),
+	TP_printk("proc=%d page_index=%zu",
+		  __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
 TRACE_EVENT(binder_command,
 TRACE_EVENT(binder_command,
 	TP_PROTO(uint32_t cmd),
 	TP_PROTO(uint32_t cmd),
 	TP_ARGS(cmd),
 	TP_ARGS(cmd),

+ 3 - 3
drivers/auxdisplay/panel.c

@@ -877,21 +877,21 @@ static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
 	spin_unlock_irq(&pprt_lock);
 	spin_unlock_irq(&pprt_lock);
 }
 }
 
 
-static struct charlcd_ops charlcd_serial_ops = {
+static const struct charlcd_ops charlcd_serial_ops = {
 	.write_cmd	= lcd_write_cmd_s,
 	.write_cmd	= lcd_write_cmd_s,
 	.write_data	= lcd_write_data_s,
 	.write_data	= lcd_write_data_s,
 	.clear_fast	= lcd_clear_fast_s,
 	.clear_fast	= lcd_clear_fast_s,
 	.backlight	= lcd_backlight,
 	.backlight	= lcd_backlight,
 };
 };
 
 
-static struct charlcd_ops charlcd_parallel_ops = {
+static const struct charlcd_ops charlcd_parallel_ops = {
 	.write_cmd	= lcd_write_cmd_p8,
 	.write_cmd	= lcd_write_cmd_p8,
 	.write_data	= lcd_write_data_p8,
 	.write_data	= lcd_write_data_p8,
 	.clear_fast	= lcd_clear_fast_p8,
 	.clear_fast	= lcd_clear_fast_p8,
 	.backlight	= lcd_backlight,
 	.backlight	= lcd_backlight,
 };
 };
 
 
-static struct charlcd_ops charlcd_tilcd_ops = {
+static const struct charlcd_ops charlcd_tilcd_ops = {
 	.write_cmd	= lcd_write_cmd_tilcd,
 	.write_cmd	= lcd_write_cmd_tilcd,
 	.write_data	= lcd_write_data_tilcd,
 	.write_data	= lcd_write_data_tilcd,
 	.clear_fast	= lcd_clear_fast_tilcd,
 	.clear_fast	= lcd_clear_fast_tilcd,

+ 1 - 1
drivers/char/applicom.c

@@ -67,7 +67,7 @@ static char *applicom_pci_devnames[] = {
 	"PCI2000PFB"
 	"PCI2000PFB"
 };
 };
 
 
-static struct pci_device_id applicom_pci_tbl[] = {
+static const struct pci_device_id applicom_pci_tbl[] = {
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
 	{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },

+ 25 - 23
drivers/char/mwave/smapi.c

@@ -128,10 +128,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
 {
 {
 	int bRC = -EIO;
 	int bRC = -EIO;
 	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
 	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
-	unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
-	unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
-	unsigned short numDspBases = 8;
-	unsigned short numUartBases = 4;
+	static const unsigned short ausDspBases[] = {
+		0x0030, 0x4E30, 0x8E30, 0xCE30,
+		0x0130, 0x0350, 0x0070, 0x0DB0 };
+	static const unsigned short ausUartBases[] = {
+		0x03F8, 0x02F8, 0x03E8, 0x02E8 };
 
 
 	PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
 	PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
 
 
@@ -148,7 +149,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
 	pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
 	pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
 	pSettings->usDspIRQ = usSI & 0x00FF;
 	pSettings->usDspIRQ = usSI & 0x00FF;
 	pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
 	pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
-	if ((usDI & 0x00FF) < numDspBases) {
+	if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
 		pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
 		pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
 	} else {
 	} else {
 		pSettings->usDspBaseIO = 0;
 		pSettings->usDspBaseIO = 0;
@@ -176,7 +177,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
 
 
 	pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
 	pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
 	pSettings->usUartIRQ = usSI & 0x000F;
 	pSettings->usUartIRQ = usSI & 0x000F;
-	if (((usSI & 0xFF00) >> 8) < numUartBases) {
+	if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
 		pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
 		pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
 	} else {
 	} else {
 		pSettings->usUartBaseIO = 0;
 		pSettings->usUartBaseIO = 0;
@@ -205,15 +206,16 @@ int smapi_set_DSP_cfg(void)
 	int bRC = -EIO;
 	int bRC = -EIO;
 	int i;
 	int i;
 	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
 	unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
-	unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
-	unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
-	unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
-	unsigned short ausUartIrqs[] = { 3, 4 };
-
-	unsigned short numDspBases = 8;
-	unsigned short numUartBases = 4;
-	unsigned short numDspIrqs = 5;
-	unsigned short numUartIrqs = 2;
+	static const unsigned short ausDspBases[] = {
+		0x0030, 0x4E30, 0x8E30, 0xCE30,
+		0x0130, 0x0350, 0x0070, 0x0DB0 };
+	static const unsigned short ausUartBases[] = {
+		0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+	static const unsigned short ausDspIrqs[] = {
+		5, 7, 10, 11, 15 };
+	static const unsigned short ausUartIrqs[] = {
+		3, 4 };
+
 	unsigned short dspio_index = 0, uartio_index = 0;
 	unsigned short dspio_index = 0, uartio_index = 0;
 
 
 	PRINTK_5(TRACE_SMAPI,
 	PRINTK_5(TRACE_SMAPI,
@@ -221,11 +223,11 @@ int smapi_set_DSP_cfg(void)
 		mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
 		mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
 
 
 	if (mwave_3780i_io) {
 	if (mwave_3780i_io) {
-		for (i = 0; i < numDspBases; i++) {
+		for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
 			if (mwave_3780i_io == ausDspBases[i])
 			if (mwave_3780i_io == ausDspBases[i])
 				break;
 				break;
 		}
 		}
-		if (i == numDspBases) {
+		if (i == ARRAY_SIZE(ausDspBases)) {
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
 			return bRC;
 			return bRC;
 		}
 		}
@@ -233,22 +235,22 @@ int smapi_set_DSP_cfg(void)
 	}
 	}
 
 
 	if (mwave_3780i_irq) {
 	if (mwave_3780i_irq) {
-		for (i = 0; i < numDspIrqs; i++) {
+		for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
 			if (mwave_3780i_irq == ausDspIrqs[i])
 			if (mwave_3780i_irq == ausDspIrqs[i])
 				break;
 				break;
 		}
 		}
-		if (i == numDspIrqs) {
+		if (i == ARRAY_SIZE(ausDspIrqs)) {
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
 			return bRC;
 			return bRC;
 		}
 		}
 	}
 	}
 
 
 	if (mwave_uart_io) {
 	if (mwave_uart_io) {
-		for (i = 0; i < numUartBases; i++) {
+		for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
 			if (mwave_uart_io == ausUartBases[i])
 			if (mwave_uart_io == ausUartBases[i])
 				break;
 				break;
 		}
 		}
-		if (i == numUartBases) {
+		if (i == ARRAY_SIZE(ausUartBases)) {
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
 			return bRC;
 			return bRC;
 		}
 		}
@@ -257,11 +259,11 @@ int smapi_set_DSP_cfg(void)
 
 
 
 
 	if (mwave_uart_irq) {
 	if (mwave_uart_irq) {
-		for (i = 0; i < numUartIrqs; i++) {
+		for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
 			if (mwave_uart_irq == ausUartIrqs[i])
 			if (mwave_uart_irq == ausUartIrqs[i])
 				break;
 				break;
 		}
 		}
-		if (i == numUartIrqs) {
+		if (i == ARRAY_SIZE(ausUartIrqs)) {
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
 			PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
 			return bRC;
 			return bRC;
 		}
 		}

+ 0 - 3
drivers/char/ppdev.c

@@ -101,9 +101,6 @@ static DEFINE_IDA(ida_index);
 #define PP_BUFFER_SIZE 1024
 #define PP_BUFFER_SIZE 1024
 #define PARDEVICE_MAX 8
 #define PARDEVICE_MAX 8
 
 
-/* ROUND_UP macro from fs/select.c */
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
-
 static DEFINE_MUTEX(pp_do_mutex);
 static DEFINE_MUTEX(pp_do_mutex);
 
 
 /* define fixed sized ioctl cmd for y2038 migration */
 /* define fixed sized ioctl cmd for y2038 migration */

+ 1 - 1
drivers/char/tlclk.c

@@ -766,7 +766,7 @@ static struct attribute *tlclk_sysfs_entries[] = {
 	NULL
 	NULL
 };
 };
 
 
-static struct attribute_group tlclk_attribute_group = {
+static const struct attribute_group tlclk_attribute_group = {
 	.name = NULL,		/* put in device directory */
 	.name = NULL,		/* put in device directory */
 	.attrs = tlclk_sysfs_entries,
 	.attrs = tlclk_sysfs_entries,
 };
 };

+ 1 - 1
drivers/char/virtio_console.c

@@ -1308,7 +1308,7 @@ static struct attribute *port_sysfs_entries[] = {
 	NULL
 	NULL
 };
 };
 
 
-static struct attribute_group port_attribute_group = {
+static const struct attribute_group port_attribute_group = {
 	.name = NULL,		/* put in device directory */
 	.name = NULL,		/* put in device directory */
 	.attrs = port_sysfs_entries,
 	.attrs = port_sysfs_entries,
 };
 };

+ 21 - 18
drivers/char/xilinx_hwicap/xilinx_hwicap.c

@@ -86,8 +86,7 @@
 #include <linux/cdev.h>
 #include <linux/cdev.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 
 
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
@@ -222,6 +221,8 @@ static const struct config_registers v6_config_registers = {
  * hwicap_command_desync - Send a DESYNC command to the ICAP port.
  * hwicap_command_desync - Send a DESYNC command to the ICAP port.
  * @drvdata: a pointer to the drvdata.
  * @drvdata: a pointer to the drvdata.
  *
  *
+ * Returns: '0' on success and failure value on error
+ *
  * This command desynchronizes the ICAP After this command, a
  * This command desynchronizes the ICAP After this command, a
  * bitstream containing a NULL packet, followed by a SYNCH packet is
  * bitstream containing a NULL packet, followed by a SYNCH packet is
  * required before the ICAP will recognize commands.
  * required before the ICAP will recognize commands.
@@ -251,10 +252,12 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
  * hwicap_get_configuration_register - Query a configuration register.
  * hwicap_get_configuration_register - Query a configuration register.
  * @drvdata: a pointer to the drvdata.
  * @drvdata: a pointer to the drvdata.
  * @reg: a constant which represents the configuration
  * @reg: a constant which represents the configuration
- *		register value to be returned.
- * 		Examples:  XHI_IDCODE, XHI_FLR.
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
  * @reg_data: returns the value of the register.
  * @reg_data: returns the value of the register.
  *
  *
+ * Returns: '0' on success and failure value on error
+ *
  * Sends a query packet to the ICAP and then receives the response.
  * Sends a query packet to the ICAP and then receives the response.
  * The icap is left in Synched state.
  * The icap is left in Synched state.
  */
  */
@@ -320,7 +323,8 @@ static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
 	dev_dbg(drvdata->dev, "initializing\n");
 	dev_dbg(drvdata->dev, "initializing\n");
 
 
 	/* Abort any current transaction, to make sure we have the
 	/* Abort any current transaction, to make sure we have the
-	 * ICAP in a good state. */
+	 * ICAP in a good state.
+	 */
 	dev_dbg(drvdata->dev, "Reset...\n");
 	dev_dbg(drvdata->dev, "Reset...\n");
 	drvdata->config->reset(drvdata);
 	drvdata->config->reset(drvdata);
 
 
@@ -632,7 +636,6 @@ static int hwicap_setup(struct device *dev, int id,
 
 
 	drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
 	drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
 	if (!drvdata) {
 	if (!drvdata) {
-		dev_err(dev, "Couldn't allocate device private record\n");
 		retval = -ENOMEM;
 		retval = -ENOMEM;
 		goto failed0;
 		goto failed0;
 	}
 	}
@@ -759,20 +762,20 @@ static int hwicap_of_probe(struct platform_device *op,
 	id = of_get_property(op->dev.of_node, "port-number", NULL);
 	id = of_get_property(op->dev.of_node, "port-number", NULL);
 
 
 	/* It's most likely that we're using V4, if the family is not
 	/* It's most likely that we're using V4, if the family is not
-	   specified */
+	 * specified
+	 */
 	regs = &v4_config_registers;
 	regs = &v4_config_registers;
 	family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
 	family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
 
 
 	if (family) {
 	if (family) {
-		if (!strcmp(family, "virtex2p")) {
+		if (!strcmp(family, "virtex2p"))
 			regs = &v2_config_registers;
 			regs = &v2_config_registers;
-		} else if (!strcmp(family, "virtex4")) {
+		else if (!strcmp(family, "virtex4"))
 			regs = &v4_config_registers;
 			regs = &v4_config_registers;
-		} else if (!strcmp(family, "virtex5")) {
+		else if (!strcmp(family, "virtex5"))
 			regs = &v5_config_registers;
 			regs = &v5_config_registers;
-		} else if (!strcmp(family, "virtex6")) {
+		else if (!strcmp(family, "virtex6"))
 			regs = &v6_config_registers;
 			regs = &v6_config_registers;
-		}
 	}
 	}
 	return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
 	return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
 			regs);
 			regs);
@@ -802,20 +805,20 @@ static int hwicap_drv_probe(struct platform_device *pdev)
 		return -ENODEV;
 		return -ENODEV;
 
 
 	/* It's most likely that we're using V4, if the family is not
 	/* It's most likely that we're using V4, if the family is not
-	   specified */
+	 * specified
+	 */
 	regs = &v4_config_registers;
 	regs = &v4_config_registers;
 	family = pdev->dev.platform_data;
 	family = pdev->dev.platform_data;
 
 
 	if (family) {
 	if (family) {
-		if (!strcmp(family, "virtex2p")) {
+		if (!strcmp(family, "virtex2p"))
 			regs = &v2_config_registers;
 			regs = &v2_config_registers;
-		} else if (!strcmp(family, "virtex4")) {
+		else if (!strcmp(family, "virtex4"))
 			regs = &v4_config_registers;
 			regs = &v4_config_registers;
-		} else if (!strcmp(family, "virtex5")) {
+		else if (!strcmp(family, "virtex5"))
 			regs = &v5_config_registers;
 			regs = &v5_config_registers;
-		} else if (!strcmp(family, "virtex6")) {
+		else if (!strcmp(family, "virtex6"))
 			regs = &v6_config_registers;
 			regs = &v6_config_registers;
-		}
 	}
 	}
 
 
 	return hwicap_setup(&pdev->dev, pdev->id, res,
 	return hwicap_setup(&pdev->dev, pdev->id, res,

+ 9 - 4
drivers/char/xilinx_hwicap/xilinx_hwicap.h

@@ -62,11 +62,13 @@ struct hwicap_drvdata {
 
 
 struct hwicap_driver_config {
 struct hwicap_driver_config {
 	/* Read configuration data given by size into the data buffer.
 	/* Read configuration data given by size into the data buffer.
-	   Return 0 if successful. */
+	 * Return 0 if successful.
+	 */
 	int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
 	int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
 			u32 size);
 			u32 size);
 	/* Write configuration data given by size from the data buffer.
 	/* Write configuration data given by size from the data buffer.
-	   Return 0 if successful. */
+	 * Return 0 if successful.
+	 */
 	int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
 	int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
 			u32 size);
 			u32 size);
 	/* Get the status register, bit pattern given by:
 	/* Get the status register, bit pattern given by:
@@ -193,11 +195,12 @@ struct config_registers {
  * hwicap_type_1_read - Generates a Type 1 read packet header.
  * hwicap_type_1_read - Generates a Type 1 read packet header.
  * @reg: is the address of the register to be read back.
  * @reg: is the address of the register to be read back.
  *
  *
+ * Return:
  * Generates a Type 1 read packet header, which is used to indirectly
  * Generates a Type 1 read packet header, which is used to indirectly
  * read registers in the configuration logic.  This packet must then
  * read registers in the configuration logic.  This packet must then
  * be sent through the icap device, and a return packet received with
  * be sent through the icap device, and a return packet received with
  * the information.
  * the information.
- **/
+ */
 static inline u32 hwicap_type_1_read(u32 reg)
 static inline u32 hwicap_type_1_read(u32 reg)
 {
 {
 	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
 	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
@@ -208,7 +211,9 @@ static inline u32 hwicap_type_1_read(u32 reg)
 /**
 /**
  * hwicap_type_1_write - Generates a Type 1 write packet header
  * hwicap_type_1_write - Generates a Type 1 write packet header
  * @reg: is the address of the register to be read back.
  * @reg: is the address of the register to be read back.
- **/
+ *
+ * Return: Type 1 write packet header
+ */
 static inline u32 hwicap_type_1_write(u32 reg)
 static inline u32 hwicap_type_1_write(u32 reg)
 {
 {
 	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
 	return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |

+ 7 - 0
drivers/extcon/Kconfig

@@ -150,4 +150,11 @@ config EXTCON_USB_GPIO
 	  Say Y here to enable GPIO based USB cable detection extcon support.
 	  Say Y here to enable GPIO based USB cable detection extcon support.
 	  Used typically if GPIO is used for USB ID pin detection.
 	  Used typically if GPIO is used for USB ID pin detection.
 
 
+config EXTCON_USBC_CROS_EC
+	tristate "ChromeOS Embedded Controller EXTCON support"
+	depends on MFD_CROS_EC
+	help
+	  Say Y here to enable USB Type C cable detection extcon support when
+	  using Chrome OS EC based USB Type-C ports.
+
 endif
 endif

+ 1 - 0
drivers/extcon/Makefile

@@ -20,3 +20,4 @@ obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
 obj-$(CONFIG_EXTCON_RT8973A)	+= extcon-rt8973a.o
 obj-$(CONFIG_EXTCON_RT8973A)	+= extcon-rt8973a.o
 obj-$(CONFIG_EXTCON_SM5502)	+= extcon-sm5502.o
 obj-$(CONFIG_EXTCON_SM5502)	+= extcon-sm5502.o
 obj-$(CONFIG_EXTCON_USB_GPIO)	+= extcon-usb-gpio.o
 obj-$(CONFIG_EXTCON_USB_GPIO)	+= extcon-usb-gpio.o
+obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o

+ 24 - 26
drivers/extcon/devres.c

@@ -1,5 +1,5 @@
 /*
 /*
- *  drivers/extcon/devres.c - EXTCON device's resource management
+ * drivers/extcon/devres.c - EXTCON device's resource management
  *
  *
  * Copyright (C) 2016 Samsung Electronics
  * Copyright (C) 2016 Samsung Electronics
  * Author: Chanwoo Choi <cw00.choi@samsung.com>
  * Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -59,10 +59,9 @@ static void devm_extcon_dev_notifier_all_unreg(struct device *dev, void *res)
 
 
 /**
 /**
  * devm_extcon_dev_allocate - Allocate managed extcon device
  * devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev:		device owning the extcon device being created
- * @supported_cable:	Array of supported extcon ending with EXTCON_NONE.
- *			If supported_cable is NULL, cable name related APIs
- *			are disabled.
+ * @dev:		the device owning the extcon device being created
+ * @supported_cable:	the array of the supported external connectors
+ *			ending with EXTCON_NONE.
  *
  *
  * This function manages automatically the memory of extcon device using device
  * This function manages automatically the memory of extcon device using device
  * resource management and simplify the control of freeing the memory of extcon
  * resource management and simplify the control of freeing the memory of extcon
@@ -97,8 +96,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
 
 
 /**
 /**
  * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
  * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
- * @dev:	device the extcon belongs to
- * @edev:	the extcon device to unregister
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device to be freed
  *
  *
  * Free the memory that is allocated with devm_extcon_dev_allocate()
  * Free the memory that is allocated with devm_extcon_dev_allocate()
  * function.
  * function.
@@ -112,10 +111,9 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
 
 
 /**
 /**
  * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
  * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev:	device to allocate extcon device
- * @edev:	the new extcon device to register
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device to be registered
  *
  *
- * Managed extcon_dev_register() function. If extcon device is attached with
  * this function, that extcon device is automatically unregistered on driver
  * this function, that extcon device is automatically unregistered on driver
  * detach. Internally this function calls extcon_dev_register() function.
  * detach. Internally this function calls extcon_dev_register() function.
  * To get more information, refer that function.
  * To get more information, refer that function.
@@ -149,8 +147,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
 
 
 /**
 /**
  * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
  * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev:	device the extcon belongs to
- * @edev:	the extcon device to unregister
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device to unregistered
  *
  *
  * Unregister extcon device that is registered with devm_extcon_dev_register()
  * Unregister extcon device that is registered with devm_extcon_dev_register()
  * function.
  * function.
@@ -164,10 +162,10 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
 
 
 /**
 /**
  * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
  * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
- * @dev:	device to allocate extcon device
- * @edev:	the extcon device that has the external connecotr.
- * @id:		the unique id of each external connector in extcon enumeration.
- * @nb:		a notifier block to be registered.
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device
+ * @id:		the unique id among the extcon enumeration
+ * @nb:		a notifier block to be registered
  *
  *
  * This function manages automatically the notifier of extcon device using
  * This function manages automatically the notifier of extcon device using
  * device resource management and simplify the control of unregistering
  * device resource management and simplify the control of unregistering
@@ -208,10 +206,10 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
 /**
 /**
  * devm_extcon_unregister_notifier()
  * devm_extcon_unregister_notifier()
 			- Resource-managed extcon_unregister_notifier()
 			- Resource-managed extcon_unregister_notifier()
- * @dev:	device to allocate extcon device
- * @edev:	the extcon device that has the external connecotr.
- * @id:		the unique id of each external connector in extcon enumeration.
- * @nb:		a notifier block to be registered.
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device
+ * @id:		the unique id among the extcon enumeration
+ * @nb:		a notifier block to be registered
  */
  */
 void devm_extcon_unregister_notifier(struct device *dev,
 void devm_extcon_unregister_notifier(struct device *dev,
 				struct extcon_dev *edev, unsigned int id,
 				struct extcon_dev *edev, unsigned int id,
@@ -225,9 +223,9 @@ EXPORT_SYMBOL(devm_extcon_unregister_notifier);
 /**
 /**
  * devm_extcon_register_notifier_all()
  * devm_extcon_register_notifier_all()
  *		- Resource-managed extcon_register_notifier_all()
  *		- Resource-managed extcon_register_notifier_all()
- * @dev:	device to allocate extcon device
- * @edev:	the extcon device that has the external connecotr.
- * @nb:		a notifier block to be registered.
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device
+ * @nb:		a notifier block to be registered
  *
  *
  * This function manages automatically the notifier of extcon device using
  * This function manages automatically the notifier of extcon device using
  * device resource management and simplify the control of unregistering
  * device resource management and simplify the control of unregistering
@@ -263,9 +261,9 @@ EXPORT_SYMBOL(devm_extcon_register_notifier_all);
 /**
 /**
  * devm_extcon_unregister_notifier_all()
  * devm_extcon_unregister_notifier_all()
  *		- Resource-managed extcon_unregister_notifier_all()
  *		- Resource-managed extcon_unregister_notifier_all()
- * @dev:	device to allocate extcon device
- * @edev:	the extcon device that has the external connecotr.
- * @nb:		a notifier block to be registered.
+ * @dev:	the device owning the extcon device being created
+ * @edev:	the extcon device
+ * @nb:		a notifier block to be registered
  */
  */
 void devm_extcon_unregister_notifier_all(struct device *dev,
 void devm_extcon_unregister_notifier_all(struct device *dev,
 				struct extcon_dev *edev,
 				struct extcon_dev *edev,

+ 1 - 1
drivers/extcon/extcon-intel-int3496.c

@@ -171,7 +171,7 @@ static int int3496_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-static struct acpi_device_id int3496_acpi_match[] = {
+static const struct acpi_device_id int3496_acpi_match[] = {
 	{ "INT3496" },
 	{ "INT3496" },
 	{ }
 	{ }
 };
 };

+ 2 - 3
drivers/extcon/extcon-max77693.c

@@ -811,9 +811,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
 			 */
 			 */
 			extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
 			extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
 						attached);
 						attached);
-			if (!cable_attached)
-				extcon_set_state_sync(info->edev,
-					EXTCON_DISP_MHL, cable_attached);
+			extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
+						cable_attached);
 			break;
 			break;
 		}
 		}
 
 

+ 417 - 0
drivers/extcon/extcon-usbc-cros-ec.c

@@ -0,0 +1,417 @@
+/**
+ * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
+ *
+ * Copyright (C) 2017 Google, Inc
+ * Author: Benson Leung <bleung@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+struct cros_ec_extcon_info {
+	struct device *dev;
+	struct extcon_dev *edev;
+
+	int port_id;
+
+	struct cros_ec_device *ec;
+
+	struct notifier_block notifier;
+
+	bool dp; /* DisplayPort enabled */
+	bool mux; /* SuperSpeed (usb3) enabled */
+	unsigned int power_type;
+};
+
+static const unsigned int usb_type_c_cable[] = {
+	EXTCON_DISP_DP,
+	EXTCON_NONE,
+};
+
+/**
+ * cros_ec_pd_command() - Send a command to the EC.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @command: EC command
+ * @version: EC command version
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
+			      unsigned int command,
+			      unsigned int version,
+			      void *outdata,
+			      unsigned int outsize,
+			      void *indata,
+			      unsigned int insize)
+{
+	struct cros_ec_command *msg;
+	int ret;
+
+	msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->version = version;
+	msg->command = command;
+	msg->outsize = outsize;
+	msg->insize = insize;
+
+	if (outsize)
+		memcpy(msg->data, outdata, outsize);
+
+	ret = cros_ec_cmd_xfer_status(info->ec, msg);
+	if (ret >= 0 && insize)
+		memcpy(indata, msg->data, insize);
+
+	kfree(msg);
+	return ret;
+}
+
+/**
+ * cros_ec_usb_get_power_type() - Get power type info about PD device attached
+ * to given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: power type on success, <0 on failure.
+ */
+static int cros_ec_usb_get_power_type(struct cros_ec_extcon_info *info)
+{
+	struct ec_params_usb_pd_power_info req;
+	struct ec_response_usb_pd_power_info resp;
+	int ret;
+
+	req.port = info->port_id;
+	ret = cros_ec_pd_command(info, EC_CMD_USB_PD_POWER_INFO, 0,
+				 &req, sizeof(req), &resp, sizeof(resp));
+	if (ret < 0)
+		return ret;
+
+	return resp.type;
+}
+
+/**
+ * cros_ec_usb_get_pd_mux_state() - Get PD mux state for given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: PD mux state on success, <0 on failure.
+ */
+static int cros_ec_usb_get_pd_mux_state(struct cros_ec_extcon_info *info)
+{
+	struct ec_params_usb_pd_mux_info req;
+	struct ec_response_usb_pd_mux_info resp;
+	int ret;
+
+	req.port = info->port_id;
+	ret = cros_ec_pd_command(info, EC_CMD_USB_PD_MUX_INFO, 0,
+				 &req, sizeof(req),
+				 &resp, sizeof(resp));
+	if (ret < 0)
+		return ret;
+
+	return resp.flags;
+}
+
+/**
+ * cros_ec_usb_get_role() - Get role info about possible PD device attached to a
+ * given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @polarity: pointer to cable polarity (return value)
+ *
+ * Return: role info on success, -ENOTCONN if no cable is connected, <0 on
+ * failure.
+ */
+static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
+				bool *polarity)
+{
+	struct ec_params_usb_pd_control pd_control;
+	struct ec_response_usb_pd_control_v1 resp;
+	int ret;
+
+	pd_control.port = info->port_id;
+	pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+	pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+	ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
+				 &pd_control, sizeof(pd_control),
+				 &resp, sizeof(resp));
+	if (ret < 0)
+		return ret;
+
+	if (!(resp.enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+		return -ENOTCONN;
+
+	*polarity = resp.polarity;
+
+	return resp.role;
+}
+
+/**
+ * cros_ec_pd_get_num_ports() - Get number of EC charge ports.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: number of ports on success, <0 on failure.
+ */
+static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
+{
+	struct ec_response_usb_pd_ports resp;
+	int ret;
+
+	ret = cros_ec_pd_command(info, EC_CMD_USB_PD_PORTS,
+				 0, NULL, 0, &resp, sizeof(resp));
+	if (ret < 0)
+		return ret;
+
+	return resp.num_ports;
+}
+
+static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
+				       bool force)
+{
+	struct device *dev = info->dev;
+	int role, power_type;
+	bool polarity = false;
+	bool dp = false;
+	bool mux = false;
+	bool hpd = false;
+
+	power_type = cros_ec_usb_get_power_type(info);
+	if (power_type < 0) {
+		dev_err(dev, "failed getting power type err = %d\n",
+			power_type);
+		return power_type;
+	}
+
+	role = cros_ec_usb_get_role(info, &polarity);
+	if (role < 0) {
+		if (role != -ENOTCONN) {
+			dev_err(dev, "failed getting role err = %d\n", role);
+			return role;
+		}
+	} else {
+		int pd_mux_state;
+
+		pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
+		if (pd_mux_state < 0)
+			pd_mux_state = USB_PD_MUX_USB_ENABLED;
+
+		dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
+		mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
+		hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
+	}
+
+	if (force || info->dp != dp || info->mux != mux ||
+		info->power_type != power_type) {
+
+		info->dp = dp;
+		info->mux = mux;
+		info->power_type = power_type;
+
+		extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+
+		extcon_set_property(info->edev, EXTCON_DISP_DP,
+				    EXTCON_PROP_USB_TYPEC_POLARITY,
+				    (union extcon_property_value)(int)polarity);
+		extcon_set_property(info->edev, EXTCON_DISP_DP,
+				    EXTCON_PROP_USB_SS,
+				    (union extcon_property_value)(int)mux);
+		extcon_set_property(info->edev, EXTCON_DISP_DP,
+				    EXTCON_PROP_DISP_HPD,
+				    (union extcon_property_value)(int)hpd);
+
+		extcon_sync(info->edev, EXTCON_DISP_DP);
+
+	} else if (hpd) {
+		extcon_set_property(info->edev, EXTCON_DISP_DP,
+				    EXTCON_PROP_DISP_HPD,
+				    (union extcon_property_value)(int)hpd);
+		extcon_sync(info->edev, EXTCON_DISP_DP);
+	}
+
+	return 0;
+}
+
+static int extcon_cros_ec_event(struct notifier_block *nb,
+				unsigned long queued_during_suspend,
+				void *_notify)
+{
+	struct cros_ec_extcon_info *info;
+	struct cros_ec_device *ec;
+	u32 host_event;
+
+	info = container_of(nb, struct cros_ec_extcon_info, notifier);
+	ec = info->ec;
+
+	host_event = cros_ec_get_host_event(ec);
+	if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
+		extcon_cros_ec_detect_cable(info, false);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int extcon_cros_ec_probe(struct platform_device *pdev)
+{
+	struct cros_ec_extcon_info *info;
+	struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	int numports, ret;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->ec = ec;
+
+	if (np) {
+		u32 port;
+
+		ret = of_property_read_u32(np, "google,usb-port-id", &port);
+		if (ret < 0) {
+			dev_err(dev, "Missing google,usb-port-id property\n");
+			return ret;
+		}
+		info->port_id = port;
+	} else {
+		info->port_id = pdev->id;
+	}
+
+	numports = cros_ec_pd_get_num_ports(info);
+	if (numports < 0) {
+		dev_err(dev, "failed getting number of ports! ret = %d\n",
+			numports);
+		return numports;
+	}
+
+	if (info->port_id >= numports) {
+		dev_err(dev, "This system only supports %d ports\n", numports);
+		return -ENODEV;
+	}
+
+	info->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
+	if (IS_ERR(info->edev)) {
+		dev_err(dev, "failed to allocate extcon device\n");
+		return -ENOMEM;
+	}
+
+	ret = devm_extcon_dev_register(dev, info->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to register extcon device\n");
+		return ret;
+	}
+
+	extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+				       EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+				       EXTCON_PROP_USB_SS);
+	extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+				       EXTCON_PROP_DISP_HPD);
+
+	platform_set_drvdata(pdev, info);
+
+	/* Get PD events from the EC */
+	info->notifier.notifier_call = extcon_cros_ec_event;
+	ret = blocking_notifier_chain_register(&info->ec->event_notifier,
+					       &info->notifier);
+	if (ret < 0) {
+		dev_err(dev, "failed to register notifier\n");
+		return ret;
+	}
+
+	/* Perform initial detection */
+	ret = extcon_cros_ec_detect_cable(info, true);
+	if (ret < 0) {
+		dev_err(dev, "failed to detect initial cable state\n");
+		goto unregister_notifier;
+	}
+
+	return 0;
+
+unregister_notifier:
+	blocking_notifier_chain_unregister(&info->ec->event_notifier,
+					   &info->notifier);
+	return ret;
+}
+
+static int extcon_cros_ec_remove(struct platform_device *pdev)
+{
+	struct cros_ec_extcon_info *info = platform_get_drvdata(pdev);
+
+	blocking_notifier_chain_unregister(&info->ec->event_notifier,
+					   &info->notifier);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int extcon_cros_ec_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int extcon_cros_ec_resume(struct device *dev)
+{
+	int ret;
+	struct cros_ec_extcon_info *info = dev_get_drvdata(dev);
+
+	ret = extcon_cros_ec_detect_cable(info, true);
+	if (ret < 0)
+		dev_err(dev, "failed to detect cable state on resume\n");
+
+	return 0;
+}
+
+static const struct dev_pm_ops extcon_cros_ec_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(extcon_cros_ec_suspend, extcon_cros_ec_resume)
+};
+
+#define DEV_PM_OPS	(&extcon_cros_ec_dev_pm_ops)
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_OF
+static const struct of_device_id extcon_cros_ec_of_match[] = {
+	{ .compatible = "google,extcon-usbc-cros-ec" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, extcon_cros_ec_of_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver extcon_cros_ec_driver = {
+	.driver = {
+		.name  = "extcon-usbc-cros-ec",
+		.of_match_table = of_match_ptr(extcon_cros_ec_of_match),
+		.pm = DEV_PM_OPS,
+	},
+	.remove  = extcon_cros_ec_remove,
+	.probe   = extcon_cros_ec_probe,
+};
+
+module_platform_driver(extcon_cros_ec_driver);
+
+MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");

+ 136 - 143
drivers/extcon/extcon.c

@@ -1,7 +1,5 @@
 /*
 /*
- *  drivers/extcon/extcon.c - External Connector (extcon) framework.
- *
- *  External connector (extcon) class driver
+ * drivers/extcon/extcon.c - External Connector (extcon) framework.
  *
  *
  * Copyright (C) 2015 Samsung Electronics
  * Copyright (C) 2015 Samsung Electronics
  * Author: Chanwoo Choi <cw00.choi@samsung.com>
  * Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -37,7 +35,6 @@
 #include "extcon.h"
 #include "extcon.h"
 
 
 #define SUPPORTED_CABLE_MAX	32
 #define SUPPORTED_CABLE_MAX	32
-#define CABLE_NAME_MAX		30
 
 
 struct __extcon_info {
 struct __extcon_info {
 	unsigned int type;
 	unsigned int type;
@@ -200,13 +197,13 @@ struct __extcon_info {
 };
 };
 
 
 /**
 /**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev:		The extcon device
- * @cable_index:	Index of this cable in the edev
- * @attr_g:		Attribute group for the cable
+ * struct extcon_cable - An internal data for an external connector.
+ * @edev:		the extcon device
+ * @cable_index:	the index of this cable in the edev
+ * @attr_g:		the attribute group for the cable
  * @attr_name:		"name" sysfs entry
  * @attr_name:		"name" sysfs entry
  * @attr_state:		"state" sysfs entry
  * @attr_state:		"state" sysfs entry
- * @attrs:		Array pointing to attr_name and attr_state for attr_g
+ * @attrs:		the array pointing to attr_name and attr_state for attr_g
  */
  */
 struct extcon_cable {
 struct extcon_cable {
 	struct extcon_dev *edev;
 	struct extcon_dev *edev;
@@ -234,15 +231,6 @@ static struct class *extcon_class;
 static LIST_HEAD(extcon_dev_list);
 static LIST_HEAD(extcon_dev_list);
 static DEFINE_MUTEX(extcon_dev_list_lock);
 static DEFINE_MUTEX(extcon_dev_list_lock);
 
 
-/**
- * check_mutually_exclusive - Check if new_state violates mutually_exclusive
- *			      condition.
- * @edev:	the extcon device
- * @new_state:	new cable attach status for @edev
- *
- * Returns 0 if nothing violates. Returns the index + 1 for the first
- * violated condition.
- */
 static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
 static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
 {
 {
 	int i = 0;
 	int i = 0;
@@ -417,11 +405,13 @@ static ssize_t cable_state_show(struct device *dev,
 }
 }
 
 
 /**
 /**
- * extcon_sync()	- Synchronize the states for both the attached/detached
- * @edev:		the extcon device that has the cable.
+ * extcon_sync() - Synchronize the state for an external connector.
+ * @edev:	the extcon device
+ *
+ * Note that this function send a notification in order to synchronize
+ * the state and property of an external connector.
  *
  *
- * This function send a notification to synchronize the all states of a
- * specific external connector
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_sync(struct extcon_dev *edev, unsigned int id)
 int extcon_sync(struct extcon_dev *edev, unsigned int id)
 {
 {
@@ -497,9 +487,11 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
 EXPORT_SYMBOL_GPL(extcon_sync);
 EXPORT_SYMBOL_GPL(extcon_sync);
 
 
 /**
 /**
- * extcon_get_state() - Get the state of a external connector.
- * @edev:	the extcon device that has the cable.
- * @id:		the unique id of each external connector in extcon enumeration.
+ * extcon_get_state() - Get the state of an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ *
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
 int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
 {
 {
@@ -522,20 +514,19 @@ int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
 EXPORT_SYMBOL_GPL(extcon_get_state);
 EXPORT_SYMBOL_GPL(extcon_get_state);
 
 
 /**
 /**
- * extcon_set_state() - Set the state of a external connector.
- *			without a notification.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @state:		the new cable status. The default semantics is
- *			true: attached / false: detached.
+ * extcon_set_state() - Set the state of an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @state:	the new state of an external connector.
+ *		the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of an external connector without
+ * a notification. To synchronize the state of an external connector,
+ * have to use extcon_set_state_sync() and extcon_sync().
  *
  *
- * This function only set the state of a external connector without
- * a notification. To synchronize the data of a external connector,
- * use extcon_set_state_sync() and extcon_sync().
+ * Returns 0 if success or error number if fail.
  */
  */
-int extcon_set_state(struct extcon_dev *edev, unsigned int id,
-				bool cable_state)
+int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	int index, ret = 0;
 	int index, ret = 0;
@@ -550,11 +541,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
 	spin_lock_irqsave(&edev->lock, flags);
 	spin_lock_irqsave(&edev->lock, flags);
 
 
 	/* Check whether the external connector's state is changed. */
 	/* Check whether the external connector's state is changed. */
-	if (!is_extcon_changed(edev, index, cable_state))
+	if (!is_extcon_changed(edev, index, state))
 		goto out;
 		goto out;
 
 
 	if (check_mutually_exclusive(edev,
 	if (check_mutually_exclusive(edev,
-		(edev->state & ~BIT(index)) | (cable_state & BIT(index)))) {
+		(edev->state & ~BIT(index)) | (state & BIT(index)))) {
 		ret = -EPERM;
 		ret = -EPERM;
 		goto out;
 		goto out;
 	}
 	}
@@ -563,11 +554,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
 	 * Initialize the value of extcon property before setting
 	 * Initialize the value of extcon property before setting
 	 * the detached state for an external connector.
 	 * the detached state for an external connector.
 	 */
 	 */
-	if (!cable_state)
+	if (!state)
 		init_property(edev, id, index);
 		init_property(edev, id, index);
 
 
-	/* Update the state for a external connector. */
-	if (cable_state)
+	/* Update the state for an external connector. */
+	if (state)
 		edev->state |= BIT(index);
 		edev->state |= BIT(index);
 	else
 	else
 		edev->state &= ~(BIT(index));
 		edev->state &= ~(BIT(index));
@@ -579,19 +570,18 @@ out:
 EXPORT_SYMBOL_GPL(extcon_set_state);
 EXPORT_SYMBOL_GPL(extcon_set_state);
 
 
 /**
 /**
- * extcon_set_state_sync() - Set the state of a external connector
- *			with a notification.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @state:		the new cable status. The default semantics is
- *			true: attached / false: detached.
+ * extcon_set_state_sync() - Set the state of an external connector with sync.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @state:	the new state of external connector.
+ *		the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of external connector
+ * and synchronize the state by sending a notification.
  *
  *
- * This function set the state of external connector and synchronize the data
- * by usning a notification.
+ * Returns 0 if success or error number if fail.
  */
  */
-int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
-				bool cable_state)
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
 {
 {
 	int ret, index;
 	int ret, index;
 	unsigned long flags;
 	unsigned long flags;
@@ -602,12 +592,12 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
 
 
 	/* Check whether the external connector's state is changed. */
 	/* Check whether the external connector's state is changed. */
 	spin_lock_irqsave(&edev->lock, flags);
 	spin_lock_irqsave(&edev->lock, flags);
-	ret = is_extcon_changed(edev, index, cable_state);
+	ret = is_extcon_changed(edev, index, state);
 	spin_unlock_irqrestore(&edev->lock, flags);
 	spin_unlock_irqrestore(&edev->lock, flags);
 	if (!ret)
 	if (!ret)
 		return 0;
 		return 0;
 
 
-	ret = extcon_set_state(edev, id, cable_state);
+	ret = extcon_set_state(edev, id, state);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 
 
@@ -616,19 +606,18 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_set_state_sync);
 EXPORT_SYMBOL_GPL(extcon_set_state_sync);
 
 
 /**
 /**
- * extcon_get_property() - Get the property value of a specific cable.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @prop:		the property id among enum extcon_property.
- * @prop_val:		the pointer which store the value of property.
+ * extcon_get_property() - Get the property value of an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @prop:	the property id indicating an extcon property
+ * @prop_val:	the pointer which store the value of extcon property
  *
  *
- * When getting the property value of external connector, the external connector
- * should be attached. If detached state, function just return 0 without
- * property value. Also, the each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when getting the property value of external connector,
+ * the external connector should be attached. If detached state, function
+ * return 0 without property value. Also, the each property should be
+ * included in the list of supported properties according to extcon type.
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_get_property(struct extcon_dev *edev, unsigned int id,
 int extcon_get_property(struct extcon_dev *edev, unsigned int id,
 				unsigned int prop,
 				unsigned int prop,
@@ -698,17 +687,16 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_get_property);
 EXPORT_SYMBOL_GPL(extcon_get_property);
 
 
 /**
 /**
- * extcon_set_property() - Set the property value of a specific cable.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @prop:		the property id among enum extcon_property.
- * @prop_val:		the pointer including the new value of property.
+ * extcon_set_property() - Set the property value of an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @prop:	the property id indicating an extcon property
+ * @prop_val:	the pointer including the new value of extcon property
  *
  *
- * The each property should be included in the list of supported properties
- * according to the type of external connectors.
+ * Note that each property should be included in the list of supported
+ * properties according to the extcon type.
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_set_property(struct extcon_dev *edev, unsigned int id,
 int extcon_set_property(struct extcon_dev *edev, unsigned int id,
 				unsigned int prop,
 				unsigned int prop,
@@ -766,15 +754,14 @@ int extcon_set_property(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_set_property);
 EXPORT_SYMBOL_GPL(extcon_set_property);
 
 
 /**
 /**
- * extcon_set_property_sync() - Set the property value of a specific cable
-			with a notification.
- * @prop_val:		the pointer including the new value of property.
+ * extcon_set_property_sync() - Set property of an external connector with sync.
+ * @prop_val:	the pointer including the new value of extcon property
  *
  *
- * When setting the property value of external connector, the external connector
- * should be attached. The each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when setting the property value of external connector,
+ * the external connector should be attached. The each property should
+ * be included in the list of supported properties according to extcon type.
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
 int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
 				unsigned int prop,
 				unsigned int prop,
@@ -791,12 +778,11 @@ int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_set_property_sync);
 EXPORT_SYMBOL_GPL(extcon_set_property_sync);
 
 
 /**
 /**
- * extcon_get_property_capability() - Get the capability of property
- *			of an external connector.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @prop:		the property id among enum extcon_property.
+ * extcon_get_property_capability() - Get the capability of the property
+ *					for an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @prop:	the property id indicating an extcon property
  *
  *
  * Returns 1 if the property is available or 0 if not available.
  * Returns 1 if the property is available or 0 if not available.
  */
  */
@@ -822,18 +808,17 @@ int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_get_property_capability);
 EXPORT_SYMBOL_GPL(extcon_get_property_capability);
 
 
 /**
 /**
- * extcon_set_property_capability() - Set the capability of a property
- *			of an external connector.
- * @edev:		the extcon device that has the cable.
- * @id:			the unique id of each external connector
- *			in extcon enumeration.
- * @prop:		the property id among enum extcon_property.
+ * extcon_set_property_capability() - Set the capability of the property
+ *					for an external connector.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @prop:	the property id indicating an extcon property
  *
  *
- * This function set the capability of a property for an external connector
- * to mark the bit in capability bitmap which mean the available state of
- * a property.
+ * Note that this function set the capability of the property
+ * for an external connector in order to mark the bit in capability
+ * bitmap which mean the available state of the property.
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
 int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
 					unsigned int prop)
 					unsigned int prop)
@@ -881,8 +866,10 @@ int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_set_property_capability);
 EXPORT_SYMBOL_GPL(extcon_set_property_capability);
 
 
 /**
 /**
- * extcon_get_extcon_dev() - Get the extcon device instance from the name
- * @extcon_name:	The extcon name provided with extcon_dev_register()
+ * extcon_get_extcon_dev() - Get the extcon device instance from the name.
+ * @extcon_name:	the extcon name provided with extcon_dev_register()
+ *
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
  */
  */
 struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
 {
@@ -904,15 +891,17 @@ out:
 EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
 EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
 
 
 /**
 /**
- * extcon_register_notifier() - Register a notifiee to get notified by
- *				any attach status changes from the extcon.
- * @edev:	the extcon device that has the external connecotr.
- * @id:		the unique id of each external connector in extcon enumeration.
- * @nb:		a notifier block to be registered.
+ * extcon_register_notifier() - Register a notifier block to get notified by
+ *				any state changes from the extcon.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @nb:		a notifier block to be registered
  *
  *
  * Note that the second parameter given to the callback of nb (val) is
  * Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
+ * the current state of an external connector and the third pameter
+ * is the pointer of extcon device.
+ *
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
 int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
 			     struct notifier_block *nb)
 			     struct notifier_block *nb)
@@ -936,10 +925,12 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_register_notifier);
 EXPORT_SYMBOL_GPL(extcon_register_notifier);
 
 
 /**
 /**
- * extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
- * @edev:	the extcon device that has the external connecotr.
- * @id:		the unique id of each external connector in extcon enumeration.
- * @nb:		a notifier block to be registered.
+ * extcon_unregister_notifier() - Unregister a notifier block from the extcon.
+ * @edev:	the extcon device
+ * @id:		the unique id indicating an external connector
+ * @nb:		a notifier block to be registered
+ *
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
 int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
 				struct notifier_block *nb)
 				struct notifier_block *nb)
@@ -963,16 +954,16 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
 EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
 EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
 
 
 /**
 /**
- * extcon_register_notifier_all() - Register a notifier block for all connectors
- * @edev:	the extcon device that has the external connector.
- * @nb:		a notifier block to be registered.
+ * extcon_register_notifier_all() - Register a notifier block for all connectors.
+ * @edev:	the extcon device
+ * @nb:		a notifier block to be registered
  *
  *
- * This function registers a notifier block in order to receive the state
- * change of all supported external connectors from extcon device.
+ * Note that this function registers a notifier block in order to receive
+ * the state change of all supported external connectors from extcon device.
  * And the second parameter given to the callback of nb (val) is
  * And the second parameter given to the callback of nb (val) is
- * the current state and third parameter is the edev pointer.
+ * the current state and the third pameter is the pointer of extcon device.
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_register_notifier_all(struct extcon_dev *edev,
 int extcon_register_notifier_all(struct extcon_dev *edev,
 				struct notifier_block *nb)
 				struct notifier_block *nb)
@@ -993,10 +984,10 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier_all);
 
 
 /**
 /**
  * extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
  * extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
- * @edev:	the extcon device that has the external connecotr.
- * @nb:		a notifier block to be registered.
+ * @edev:	the extcon device
+ * @nb:		a notifier block to be registered
  *
  *
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_unregister_notifier_all(struct extcon_dev *edev,
 int extcon_unregister_notifier_all(struct extcon_dev *edev,
 				struct notifier_block *nb)
 				struct notifier_block *nb)
@@ -1045,15 +1036,14 @@ static void dummy_sysfs_dev_release(struct device *dev)
 
 
 /*
 /*
  * extcon_dev_allocate() - Allocate the memory of extcon device.
  * extcon_dev_allocate() - Allocate the memory of extcon device.
- * @supported_cable:	Array of supported extcon ending with EXTCON_NONE.
- *			If supported_cable is NULL, cable name related APIs
- *			are disabled.
+ * @supported_cable:	the array of the supported external connectors
+ *			ending with EXTCON_NONE.
  *
  *
- * This function allocates the memory for extcon device without allocating
- * memory in each extcon provider driver and initialize default setting for
- * extcon device.
+ * Note that this function allocates the memory for extcon device 
+ * and initialize default setting for the extcon device.
  *
  *
- * Return the pointer of extcon device if success or ERR_PTR(err) if fail
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail.
  */
  */
 struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
 struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
 {
 {
@@ -1074,7 +1064,7 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
 
 
 /*
 /*
  * extcon_dev_free() - Free the memory of extcon device.
  * extcon_dev_free() - Free the memory of extcon device.
- * @edev:	the extcon device to free
+ * @edev:	the extcon device
  */
  */
 void extcon_dev_free(struct extcon_dev *edev)
 void extcon_dev_free(struct extcon_dev *edev)
 {
 {
@@ -1083,13 +1073,18 @@ void extcon_dev_free(struct extcon_dev *edev)
 EXPORT_SYMBOL_GPL(extcon_dev_free);
 EXPORT_SYMBOL_GPL(extcon_dev_free);
 
 
 /**
 /**
- * extcon_dev_register() - Register a new extcon device
- * @edev	: the new extcon device (should be allocated before calling)
+ * extcon_dev_register() - Register an new extcon device
+ * @edev:	the extcon device to be registered
  *
  *
  * Among the members of edev struct, please set the "user initializing data"
  * Among the members of edev struct, please set the "user initializing data"
- * in any case and set the "optional callbacks" if required. However, please
  * do not set the values of "internal data", which are initialized by
  * do not set the values of "internal data", which are initialized by
  * this function.
  * this function.
+ *
+ * Note that before calling this funciton, have to allocate the memory
+ * of an extcon device by using the extcon_dev_allocate(). And the extcon
+ * dev should include the supported_cable information.
+ *
+ * Returns 0 if success or error number if fail.
  */
  */
 int extcon_dev_register(struct extcon_dev *edev)
 int extcon_dev_register(struct extcon_dev *edev)
 {
 {
@@ -1296,7 +1291,7 @@ EXPORT_SYMBOL_GPL(extcon_dev_register);
 
 
 /**
 /**
  * extcon_dev_unregister() - Unregister the extcon device.
  * extcon_dev_unregister() - Unregister the extcon device.
- * @edev:	the extcon device instance to be unregistered.
+ * @edev:	the extcon device to be unregistered.
  *
  *
  * Note that this does not call kfree(edev) because edev was not allocated
  * Note that this does not call kfree(edev) because edev was not allocated
  * by this class.
  * by this class.
@@ -1342,11 +1337,11 @@ EXPORT_SYMBOL_GPL(extcon_dev_unregister);
 
 
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
 /*
 /*
- * extcon_get_edev_by_phandle - Get the extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree.
+ * @dev		: the instance to the given device
+ * @index	: the index into list of extcon_dev
  *
  *
- * return the instance of extcon device
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
  */
  */
 struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
 struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
 {
 {
@@ -1363,8 +1358,8 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
 
 
 	node = of_parse_phandle(dev->of_node, "extcon", index);
 	node = of_parse_phandle(dev->of_node, "extcon", index);
 	if (!node) {
 	if (!node) {
-		dev_dbg(dev, "failed to get phandle in %s node\n",
-			dev->of_node->full_name);
+		dev_dbg(dev, "failed to get phandle in %pOF node\n",
+			dev->of_node);
 		return ERR_PTR(-ENODEV);
 		return ERR_PTR(-ENODEV);
 	}
 	}
 
 
@@ -1411,8 +1406,6 @@ static void __exit extcon_class_exit(void)
 module_exit(extcon_class_exit);
 module_exit(extcon_class_exit);
 
 
 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("External connector (extcon) class driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("External Connector (extcon) framework");
+MODULE_LICENSE("GPL v2");

+ 5 - 5
drivers/firmware/google/vpd.c

@@ -202,7 +202,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
 	sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
 	sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
 	if (!sec->raw_name) {
 	if (!sec->raw_name) {
 		err = -ENOMEM;
 		err = -ENOMEM;
-		goto err_iounmap;
+		goto err_memunmap;
 	}
 	}
 
 
 	sysfs_bin_attr_init(&sec->bin_attr);
 	sysfs_bin_attr_init(&sec->bin_attr);
@@ -233,8 +233,8 @@ err_sysfs_remove:
 	sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
 	sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
 err_free_raw_name:
 err_free_raw_name:
 	kfree(sec->raw_name);
 	kfree(sec->raw_name);
-err_iounmap:
-	iounmap(sec->baseaddr);
+err_memunmap:
+	memunmap(sec->baseaddr);
 	return err;
 	return err;
 }
 }
 
 
@@ -245,7 +245,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
 		kobject_put(sec->kobj);
 		kobject_put(sec->kobj);
 		sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
 		sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
 		kfree(sec->raw_name);
 		kfree(sec->raw_name);
-		iounmap(sec->baseaddr);
+		memunmap(sec->baseaddr);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
 	memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
-	iounmap(temp);
+	memunmap(temp);
 
 
 	if (header.magic != VPD_CBMEM_MAGIC)
 	if (header.magic != VPD_CBMEM_MAGIC)
 		return -ENODEV;
 		return -ENODEV;

+ 1 - 0
drivers/fmc/Makefile

@@ -6,6 +6,7 @@ fmc-y += fmc-match.o
 fmc-y += fmc-sdb.o
 fmc-y += fmc-sdb.o
 fmc-y += fru-parse.o
 fmc-y += fru-parse.o
 fmc-y += fmc-dump.o
 fmc-y += fmc-dump.o
+fmc-y += fmc-debug.o
 
 
 obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
 obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
 obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o
 obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o

+ 1 - 2
drivers/fmc/fmc-chardev.c

@@ -129,8 +129,7 @@ static int fc_probe(struct fmc_device *fmc)
 
 
 	struct fc_instance *fc;
 	struct fc_instance *fc;
 
 
-	if (fmc->op->validate)
-		index = fmc->op->validate(fmc, &fc_drv);
+	index = fmc_validate(fmc, &fc_drv);
 	if (index < 0)
 	if (index < 0)
 		return -EINVAL; /* not our device: invalid */
 		return -EINVAL; /* not our device: invalid */
 
 

+ 90 - 5
drivers/fmc/fmc-core.c

@@ -13,6 +13,9 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/fmc.h>
 #include <linux/fmc.h>
+#include <linux/fmc-sdb.h>
+
+#include "fmc-private.h"
 
 
 static int fmc_check_version(unsigned long version, const char *name)
 static int fmc_check_version(unsigned long version, const char *name)
 {
 {
@@ -118,6 +121,61 @@ static struct bin_attribute fmc_eeprom_attr = {
 	.write = fmc_write_eeprom,
 	.write = fmc_write_eeprom,
 };
 };
 
 
+int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+		    char *name, int flags)
+{
+	if (fmc->op->irq_request)
+		return fmc->op->irq_request(fmc, h, name, flags);
+	return -EPERM;
+}
+EXPORT_SYMBOL(fmc_irq_request);
+
+void fmc_irq_free(struct fmc_device *fmc)
+{
+	if (fmc->op->irq_free)
+		fmc->op->irq_free(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_free);
+
+void fmc_irq_ack(struct fmc_device *fmc)
+{
+	if (likely(fmc->op->irq_ack))
+		fmc->op->irq_ack(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_ack);
+
+int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv)
+{
+	if (fmc->op->validate)
+		return fmc->op->validate(fmc, drv);
+	return -EPERM;
+}
+EXPORT_SYMBOL(fmc_validate);
+
+int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, int ngpio)
+{
+	if (fmc->op->gpio_config)
+		return fmc->op->gpio_config(fmc, gpio, ngpio);
+	return -EPERM;
+}
+EXPORT_SYMBOL(fmc_gpio_config);
+
+int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l)
+{
+	if (fmc->op->read_ee)
+		return fmc->op->read_ee(fmc, pos, d, l);
+	return -EPERM;
+}
+EXPORT_SYMBOL(fmc_read_ee);
+
+int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l)
+{
+	if (fmc->op->write_ee)
+		return fmc->op->write_ee(fmc, pos, d, l);
+	return -EPERM;
+}
+EXPORT_SYMBOL(fmc_write_ee);
+
 /*
 /*
  * Functions for client modules follow
  * Functions for client modules follow
  */
  */
@@ -141,7 +199,8 @@ EXPORT_SYMBOL(fmc_driver_unregister);
  * When a device set is registered, all eeproms must be read
  * When a device set is registered, all eeproms must be read
  * and all FRUs must be parsed
  * and all FRUs must be parsed
  */
  */
-int fmc_device_register_n(struct fmc_device **devs, int n)
+int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+			  struct fmc_gateware *gw)
 {
 {
 	struct fmc_device *fmc, **devarray;
 	struct fmc_device *fmc, **devarray;
 	uint32_t device_id;
 	uint32_t device_id;
@@ -221,6 +280,21 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
 		else
 		else
 			dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
 			dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
 				     device_id);
 				     device_id);
+
+		if (gw) {
+			/*
+			 * The carrier already know the bitstream to load
+			 * for this set of FMC mezzanines.
+			 */
+			ret = fmc->op->reprogram_raw(fmc, NULL,
+						     gw->bitstream, gw->len);
+			if (ret) {
+				dev_warn(fmc->hwdev,
+					 "Invalid gateware for FMC mezzanine\n");
+				goto out;
+			}
+		}
+
 		ret = device_add(&fmc->dev);
 		ret = device_add(&fmc->dev);
 		if (ret < 0) {
 		if (ret < 0) {
 			dev_err(fmc->hwdev, "Slot %i: Failed in registering "
 			dev_err(fmc->hwdev, "Slot %i: Failed in registering "
@@ -234,18 +308,16 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
 		}
 		}
 		/* This device went well, give information to the user */
 		/* This device went well, give information to the user */
 		fmc_dump_eeprom(fmc);
 		fmc_dump_eeprom(fmc);
-		fmc_dump_sdb(fmc);
+		fmc_debug_init(fmc);
 	}
 	}
 	return 0;
 	return 0;
 
 
 out1:
 out1:
 	device_del(&fmc->dev);
 	device_del(&fmc->dev);
 out:
 out:
-	fmc_free_id_info(fmc);
-	put_device(&fmc->dev);
-
 	kfree(devarray);
 	kfree(devarray);
 	for (i--; i >= 0; i--) {
 	for (i--; i >= 0; i--) {
+		fmc_debug_exit(devs[i]);
 		sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
 		sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
 		device_del(&devs[i]->dev);
 		device_del(&devs[i]->dev);
 		fmc_free_id_info(devs[i]);
 		fmc_free_id_info(devs[i]);
@@ -254,8 +326,20 @@ out:
 	return ret;
 	return ret;
 
 
 }
 }
+EXPORT_SYMBOL(fmc_device_register_n_gw);
+
+int fmc_device_register_n(struct fmc_device **devs, int n)
+{
+	return fmc_device_register_n_gw(devs, n, NULL);
+}
 EXPORT_SYMBOL(fmc_device_register_n);
 EXPORT_SYMBOL(fmc_device_register_n);
 
 
+int fmc_device_register_gw(struct fmc_device *fmc, struct fmc_gateware *gw)
+{
+	return fmc_device_register_n_gw(&fmc, 1, gw);
+}
+EXPORT_SYMBOL(fmc_device_register_gw);
+
 int fmc_device_register(struct fmc_device *fmc)
 int fmc_device_register(struct fmc_device *fmc)
 {
 {
 	return fmc_device_register_n(&fmc, 1);
 	return fmc_device_register_n(&fmc, 1);
@@ -273,6 +357,7 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
 	kfree(devs[0]->devarray);
 	kfree(devs[0]->devarray);
 
 
 	for (i = 0; i < n; i++) {
 	for (i = 0; i < n; i++) {
+		fmc_debug_exit(devs[i]);
 		sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
 		sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
 		device_del(&devs[i]->dev);
 		device_del(&devs[i]->dev);
 		fmc_free_id_info(devs[i]);
 		fmc_free_id_info(devs[i]);

+ 173 - 0
drivers/fmc/fmc-debug.c

@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/byteorder.h>
+
+#include <linux/fmc.h>
+#include <linux/sdb.h>
+#include <linux/fmc-sdb.h>
+
+#define FMC_DBG_SDB_DUMP "dump_sdb"
+
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+	int i = len - 1;
+
+	memcpy(buf, str, len);
+	buf[len] = '\0';
+	while (i >= 0 && buf[i] == ' ')
+		buf[i--] = '\0';
+	return buf;
+}
+
+#define __sdb_string(buf, field) ({			\
+	BUILD_BUG_ON(sizeof(buf) < sizeof(field));	\
+	__strip_trailing_space(buf, (void *)(field), sizeof(field));	\
+		})
+
+/**
+ * We do not check seq_printf() errors because we want to see things in any case
+ */
+static void fmc_sdb_dump_recursive(struct fmc_device *fmc, struct seq_file *s,
+				   const struct sdb_array *arr)
+{
+	unsigned long base = arr->baseaddr;
+	int i, j, n = arr->len, level = arr->level;
+	char tmp[64];
+
+	for (i = 0; i < n; i++) {
+		union  sdb_record *r;
+		struct sdb_product *p;
+		struct sdb_component *c;
+
+		r = &arr->record[i];
+		c = &r->dev.sdb_component;
+		p = &c->product;
+
+		for (j = 0; j < level; j++)
+			seq_printf(s, "   ");
+		switch (r->empty.record_type) {
+		case sdb_type_interconnect:
+			seq_printf(s, "%08llx:%08x %.19s\n",
+				   __be64_to_cpu(p->vendor_id),
+				   __be32_to_cpu(p->device_id),
+				   p->name);
+			break;
+		case sdb_type_device:
+			seq_printf(s, "%08llx:%08x %.19s (%08llx-%08llx)\n",
+				   __be64_to_cpu(p->vendor_id),
+				   __be32_to_cpu(p->device_id),
+				   p->name,
+				   __be64_to_cpu(c->addr_first) + base,
+				   __be64_to_cpu(c->addr_last) + base);
+			break;
+		case sdb_type_bridge:
+			seq_printf(s, "%08llx:%08x %.19s (bridge: %08llx)\n",
+				   __be64_to_cpu(p->vendor_id),
+				   __be32_to_cpu(p->device_id),
+				   p->name,
+				   __be64_to_cpu(c->addr_first) + base);
+			if (IS_ERR(arr->subtree[i])) {
+				seq_printf(s, "SDB: (bridge error %li)\n",
+					 PTR_ERR(arr->subtree[i]));
+				break;
+			}
+			fmc_sdb_dump_recursive(fmc, s, arr->subtree[i]);
+			break;
+		case sdb_type_integration:
+			seq_printf(s, "integration\n");
+			break;
+		case sdb_type_repo_url:
+			seq_printf(s, "Synthesis repository: %s\n",
+					  __sdb_string(tmp, r->repo_url.repo_url));
+			break;
+		case sdb_type_synthesis:
+			seq_printf(s, "Bitstream '%s' ",
+					  __sdb_string(tmp, r->synthesis.syn_name));
+			seq_printf(s, "synthesized %08x by %s ",
+					  __be32_to_cpu(r->synthesis.date),
+					  __sdb_string(tmp, r->synthesis.user_name));
+			seq_printf(s, "(%s version %x), ",
+					  __sdb_string(tmp, r->synthesis.tool_name),
+					  __be32_to_cpu(r->synthesis.tool_version));
+			seq_printf(s, "commit %pm\n",
+					  r->synthesis.commit_id);
+			break;
+		case sdb_type_empty:
+			seq_printf(s, "empty\n");
+			break;
+		default:
+			seq_printf(s, "UNKNOWN TYPE 0x%02x\n",
+				   r->empty.record_type);
+			break;
+		}
+	}
+}
+
+static int fmc_sdb_dump(struct seq_file *s, void *offset)
+{
+	struct fmc_device *fmc = s->private;
+
+	if (!fmc->sdb) {
+		seq_printf(s, "no SDB information\n");
+		return 0;
+	}
+
+	seq_printf(s, "FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
+	fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
+	/* Dump SDB information */
+	fmc_sdb_dump_recursive(fmc, s, fmc->sdb);
+
+	return 0;
+}
+
+
+static int fmc_sdb_dump_open(struct inode *inode, struct file *file)
+{
+	struct fmc_device *fmc = inode->i_private;
+
+	return single_open(file, fmc_sdb_dump, fmc);
+}
+
+
+const struct file_operations fmc_dbgfs_sdb_dump = {
+	.owner = THIS_MODULE,
+	.open  = fmc_sdb_dump_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int fmc_debug_init(struct fmc_device *fmc)
+{
+	fmc->dbg_dir = debugfs_create_dir(dev_name(&fmc->dev), NULL);
+	if (IS_ERR_OR_NULL(fmc->dbg_dir)) {
+		pr_err("FMC: Cannot create debugfs\n");
+		return PTR_ERR(fmc->dbg_dir);
+	}
+
+	fmc->dbg_sdb_dump = debugfs_create_file(FMC_DBG_SDB_DUMP, 0444,
+						fmc->dbg_dir, fmc,
+						&fmc_dbgfs_sdb_dump);
+	if (IS_ERR_OR_NULL(fmc->dbg_sdb_dump))
+		pr_err("FMC: Cannot create debugfs file %s\n",
+		       FMC_DBG_SDB_DUMP);
+
+	return 0;
+}
+
+void fmc_debug_exit(struct fmc_device *fmc)
+{
+	if (fmc->dbg_dir)
+		debugfs_remove_recursive(fmc->dbg_dir);
+}

+ 0 - 41
drivers/fmc/fmc-dump.c

@@ -15,8 +15,6 @@
 
 
 static int fmc_must_dump_eeprom;
 static int fmc_must_dump_eeprom;
 module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
 module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
-static int fmc_must_dump_sdb;
-module_param_named(dump_sdb, fmc_must_dump_sdb, int, 0644);
 
 
 #define LINELEN 16
 #define LINELEN 16
 
 
@@ -59,42 +57,3 @@ void fmc_dump_eeprom(const struct fmc_device *fmc)
 	for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
 	for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
 		prev = dump_line(i, line, prev);
 		prev = dump_line(i, line, prev);
 }
 }
-
-void fmc_dump_sdb(const struct fmc_device *fmc)
-{
-	const uint8_t *line, *prev;
-	int i, len;
-
-	if (!fmc->sdb)
-		return;
-	if (!fmc_must_dump_sdb)
-		return;
-
-	/* If the argument is not-zero, do simple dump (== show) */
-	if (fmc_must_dump_sdb > 0)
-		fmc_show_sdb_tree(fmc);
-
-	if (fmc_must_dump_sdb == 1)
-		return;
-
-	/* If bigger than 1, dump it seriously, to help debugging */
-
-	/*
-	 * Here we should really use libsdbfs (which is designed to
-	 * work in kernel space as well) , but it doesn't support
-	 * directories yet, and it requires better intergration (it
-	 * should be used instead of fmc-specific code).
-	 *
-	 * So, lazily, just dump the top-level array
-	 */
-	pr_info("FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
-		fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
-	pr_info("FMC: poor dump of sdb first level:\n");
-
-	len = fmc->sdb->len * sizeof(union sdb_record);
-	line = (void *)fmc->sdb->record;
-	prev = NULL;
-	for (i = 0; i < len; i += LINELEN, line += LINELEN)
-		prev = dump_line(i, line, prev);
-	return;
-}

+ 1 - 1
drivers/fmc/fmc-match.c

@@ -63,7 +63,7 @@ int fmc_fill_id_info(struct fmc_device *fmc)
 		if (!fmc->eeprom)
 		if (!fmc->eeprom)
 			return -ENOMEM;
 			return -ENOMEM;
 		allocated = 1;
 		allocated = 1;
-		ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
+		ret = fmc_read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
 		if (ret < 0)
 		if (ret < 0)
 			goto out;
 			goto out;
 	}
 	}

+ 9 - 0
drivers/fmc/fmc-private.h

@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+extern int fmc_debug_init(struct fmc_device *fmc);
+extern void fmc_debug_exit(struct fmc_device *fmc);

+ 25 - 94
drivers/fmc/fmc-sdb.c

@@ -127,12 +127,12 @@ int fmc_free_sdb_tree(struct fmc_device *fmc)
 EXPORT_SYMBOL(fmc_free_sdb_tree);
 EXPORT_SYMBOL(fmc_free_sdb_tree);
 
 
 /* This helper calls reprogram and inizialized sdb as well */
 /* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
-			 int sdb_entry)
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+		      void *gw, unsigned long len, int sdb_entry)
 {
 {
 	int ret;
 	int ret;
 
 
-	ret = fmc->op->reprogram(fmc, d, gw);
+	ret = fmc->op->reprogram_raw(fmc, d, gw, len);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 	if (sdb_entry < 0)
 	if (sdb_entry < 0)
@@ -145,108 +145,39 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
 			sdb_entry);
 			sdb_entry);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
-	fmc_dump_sdb(fmc);
-	return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram);
-
-static char *__strip_trailing_space(char *buf, char *str, int len)
-{
-	int i = len - 1;
 
 
-	memcpy(buf, str, len);
-	while(i >= 0 && buf[i] == ' ')
-		buf[i--] = '\0';
-	return buf;
+	return 0;
 }
 }
+EXPORT_SYMBOL(fmc_reprogram_raw);
 
 
-#define __sdb_string(buf, field) ({			\
-	BUILD_BUG_ON(sizeof(buf) < sizeof(field));	\
-	__strip_trailing_space(buf, (void *)(field), sizeof(field));	\
-		})
-
-static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
-				const struct sdb_array *arr)
+/* This helper calls reprogram and inizialized sdb as well */
+int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
+			 int sdb_entry)
 {
 {
-	unsigned long base = arr->baseaddr;
-	int i, j, n = arr->len, level = arr->level;
-	char buf[64];
-
-	for (i = 0; i < n; i++) {
-		union  sdb_record *r;
-		struct sdb_product *p;
-		struct sdb_component *c;
-		r = &arr->record[i];
-		c = &r->dev.sdb_component;
-		p = &c->product;
+	int ret;
 
 
-		dev_info(&fmc->dev, "SDB: ");
+	ret = fmc->op->reprogram(fmc, d, gw);
+	if (ret < 0)
+		return ret;
+	if (sdb_entry < 0)
+		return ret;
 
 
-		for (j = 0; j < level; j++)
-			printk(KERN_CONT "   ");
-		switch (r->empty.record_type) {
-		case sdb_type_interconnect:
-			printk(KERN_CONT "%08llx:%08x %.19s\n",
-			       __be64_to_cpu(p->vendor_id),
-			       __be32_to_cpu(p->device_id),
-			       p->name);
-			break;
-		case sdb_type_device:
-			printk(KERN_CONT "%08llx:%08x %.19s (%08llx-%08llx)\n",
-			       __be64_to_cpu(p->vendor_id),
-			       __be32_to_cpu(p->device_id),
-			       p->name,
-			       __be64_to_cpu(c->addr_first) + base,
-			       __be64_to_cpu(c->addr_last) + base);
-			break;
-		case sdb_type_bridge:
-			printk(KERN_CONT "%08llx:%08x %.19s (bridge: %08llx)\n",
-			       __be64_to_cpu(p->vendor_id),
-			       __be32_to_cpu(p->device_id),
-			       p->name,
-			       __be64_to_cpu(c->addr_first) + base);
-			if (IS_ERR(arr->subtree[i])) {
-				dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
-					 PTR_ERR(arr->subtree[i]));
-				break;
-			}
-			__fmc_show_sdb_tree(fmc, arr->subtree[i]);
-			break;
-		case sdb_type_integration:
-			printk(KERN_CONT "integration\n");
-			break;
-		case sdb_type_repo_url:
-			printk(KERN_CONT "Synthesis repository: %s\n",
-			       __sdb_string(buf, r->repo_url.repo_url));
-			break;
-		case sdb_type_synthesis:
-			printk(KERN_CONT "Bitstream '%s' ",
-			       __sdb_string(buf, r->synthesis.syn_name));
-			printk(KERN_CONT "synthesized %08x by %s ",
-			       __be32_to_cpu(r->synthesis.date),
-			       __sdb_string(buf, r->synthesis.user_name));
-			printk(KERN_CONT "(%s version %x), ",
-			       __sdb_string(buf, r->synthesis.tool_name),
-			       __be32_to_cpu(r->synthesis.tool_version));
-			printk(KERN_CONT "commit %pm\n",
-			       r->synthesis.commit_id);
-			break;
-		case sdb_type_empty:
-			printk(KERN_CONT "empty\n");
-			break;
-		default:
-			printk(KERN_CONT "UNKNOWN TYPE 0x%02x\n",
-			       r->empty.record_type);
-			break;
-		}
+	/* We are required to find SDB at a given offset */
+	ret = fmc_scan_sdb_tree(fmc, sdb_entry);
+	if (ret < 0) {
+		dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
+			sdb_entry);
+		return -ENODEV;
 	}
 	}
+
+	return 0;
 }
 }
+EXPORT_SYMBOL(fmc_reprogram);
 
 
 void fmc_show_sdb_tree(const struct fmc_device *fmc)
 void fmc_show_sdb_tree(const struct fmc_device *fmc)
 {
 {
-	if (!fmc->sdb)
-		return;
-	__fmc_show_sdb_tree(fmc, fmc->sdb);
+	pr_err("%s: not supported anymore, use debugfs to dump SDB\n",
+		__func__);
 }
 }
 EXPORT_SYMBOL(fmc_show_sdb_tree);
 EXPORT_SYMBOL(fmc_show_sdb_tree);
 
 

+ 8 - 12
drivers/fmc/fmc-trivial.c

@@ -24,7 +24,7 @@ static irqreturn_t t_handler(int irq, void *dev_id)
 {
 {
 	struct fmc_device *fmc = dev_id;
 	struct fmc_device *fmc = dev_id;
 
 
-	fmc->op->irq_ack(fmc);
+	fmc_irq_ack(fmc);
 	dev_info(&fmc->dev, "received irq %i\n", irq);
 	dev_info(&fmc->dev, "received irq %i\n", irq);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -46,25 +46,21 @@ static int t_probe(struct fmc_device *fmc)
 	int ret;
 	int ret;
 	int index = 0;
 	int index = 0;
 
 
-	if (fmc->op->validate)
-		index = fmc->op->validate(fmc, &t_drv);
+	index = fmc_validate(fmc, &t_drv);
 	if (index < 0)
 	if (index < 0)
 		return -EINVAL; /* not our device: invalid */
 		return -EINVAL; /* not our device: invalid */
 
 
-	ret = fmc->op->irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
+	ret = fmc_irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 	/* ignore error code of call below, we really don't care */
 	/* ignore error code of call below, we really don't care */
-	fmc->op->gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
+	fmc_gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
 
 
-	/* Reprogram, if asked to. ESRCH == no filename specified */
-	ret = -ESRCH;
-	if (fmc->op->reprogram)
-		ret = fmc->op->reprogram(fmc, &t_drv, "");
-	if (ret == -ESRCH)
+	ret = fmc_reprogram(fmc, &t_drv, "", 0);
+	if (ret == -EPERM) /* programming not supported */
 		ret = 0;
 		ret = 0;
 	if (ret < 0)
 	if (ret < 0)
-		fmc->op->irq_free(fmc);
+		fmc_irq_free(fmc);
 
 
 	/* FIXME: reprogram LM32 too */
 	/* FIXME: reprogram LM32 too */
 	return ret;
 	return ret;
@@ -72,7 +68,7 @@ static int t_probe(struct fmc_device *fmc)
 
 
 static int t_remove(struct fmc_device *fmc)
 static int t_remove(struct fmc_device *fmc)
 {
 {
-	fmc->op->irq_free(fmc);
+	fmc_irq_free(fmc);
 	return 0;
 	return 0;
 }
 }
 
 

+ 4 - 4
drivers/fmc/fmc-write-eeprom.c

@@ -50,7 +50,7 @@ static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
 		if (write) {
 		if (write) {
 			dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
 			dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
 				 thislen, thisaddr);
 				 thislen, thisaddr);
-			err = fmc->op->write_ee(fmc, thisaddr, p + 5, thislen);
+			err = fmc_write_ee(fmc, thisaddr, p + 5, thislen);
 		}
 		}
 		if (err < 0) {
 		if (err < 0) {
 			dev_err(&fmc->dev, "write failure @0x%04x\n",
 			dev_err(&fmc->dev, "write failure @0x%04x\n",
@@ -70,7 +70,7 @@ static int fwe_run_bin(struct fmc_device *fmc, const struct firmware *fw)
 	int ret;
 	int ret;
 
 
 	dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
 	dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
-	ret = fmc->op->write_ee(fmc, 0, (void *)fw->data, fw->size);
+	ret = fmc_write_ee(fmc, 0, (void *)fw->data, fw->size);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
 		dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
 		return ret;
 		return ret;
@@ -115,8 +115,8 @@ static int fwe_probe(struct fmc_device *fmc)
 			KBUILD_MODNAME);
 			KBUILD_MODNAME);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
-	if (fmc->op->validate)
-		index = fmc->op->validate(fmc, &fwe_drv);
+
+	index = fmc_validate(fmc, &fwe_drv);
 	if (index < 0) {
 	if (index < 0) {
 		pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
 		pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
 		       dev_name(dev));
 		       dev_name(dev));

+ 1 - 2
drivers/fmc/fru-parse.c

@@ -31,12 +31,11 @@ static char *__fru_alloc_get_tl(struct fru_common_header *header, int nr)
 {
 {
 	struct fru_type_length *tl;
 	struct fru_type_length *tl;
 	char *res;
 	char *res;
-	int len;
 
 
 	tl = __fru_get_board_tl(header, nr);
 	tl = __fru_get_board_tl(header, nr);
 	if (!tl)
 	if (!tl)
 		return NULL;
 		return NULL;
-	len = fru_strlen(tl);
+
 	res = fru_alloc(fru_strlen(tl) + 1);
 	res = fru_alloc(fru_strlen(tl) + 1);
 	if (!res)
 	if (!res)
 		return NULL;
 		return NULL;

+ 15 - 5
drivers/fpga/Kconfig

@@ -2,9 +2,7 @@
 # FPGA framework configuration
 # FPGA framework configuration
 #
 #
 
 
-menu "FPGA Configuration Support"
-
-config FPGA
+menuconfig FPGA
 	tristate "FPGA Configuration Framework"
 	tristate "FPGA Configuration Framework"
 	help
 	help
 	  Say Y here if you want support for configuring FPGAs from the
 	  Say Y here if you want support for configuring FPGAs from the
@@ -26,6 +24,20 @@ config FPGA_MGR_ICE40_SPI
 	help
 	help
 	  FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
 	  FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
 
 
+config FPGA_MGR_ALTERA_CVP
+	tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+	depends on PCI
+	help
+	  FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+	  and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+
+config FPGA_MGR_ALTERA_PS_SPI
+	tristate "Altera FPGA Passive Serial over SPI"
+	depends on SPI
+	help
+	  FPGA manager driver support for Altera Arria/Cyclone/Stratix
+	  using the passive serial interface over SPI.
+
 config FPGA_MGR_SOCFPGA
 config FPGA_MGR_SOCFPGA
 	tristate "Altera SOCFPGA FPGA Manager"
 	tristate "Altera SOCFPGA FPGA Manager"
 	depends on ARCH_SOCFPGA || COMPILE_TEST
 	depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -106,5 +118,3 @@ config XILINX_PR_DECOUPLER
 	  being reprogrammed during partial reconfig.
 	  being reprogrammed during partial reconfig.
 
 
 endif # FPGA
 endif # FPGA
-
-endmenu

+ 2 - 0
drivers/fpga/Makefile

@@ -6,6 +6,8 @@
 obj-$(CONFIG_FPGA)			+= fpga-mgr.o
 obj-$(CONFIG_FPGA)			+= fpga-mgr.o
 
 
 # FPGA Manager Drivers
 # FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ALTERA_CVP)	+= altera-cvp.o
+obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI)	+= altera-ps-spi.o
 obj-$(CONFIG_FPGA_MGR_ICE40_SPI)	+= ice40-spi.o
 obj-$(CONFIG_FPGA_MGR_ICE40_SPI)	+= ice40-spi.o
 obj-$(CONFIG_FPGA_MGR_SOCFPGA)		+= socfpga.o
 obj-$(CONFIG_FPGA_MGR_SOCFPGA)		+= socfpga.o
 obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10)	+= socfpga-a10.o
 obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10)	+= socfpga-a10.o

+ 500 - 0
drivers/fpga/altera-cvp.c

@@ -0,0 +1,500 @@
+/*
+ * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Manage Altera FPGA firmware using PCIe CvP.
+ * Firmware must be in binary "rbf" format.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#define CVP_BAR		0	/* BAR used for data transfer in memory mode */
+#define CVP_DUMMY_WR	244	/* dummy writes to clear CvP state machine */
+#define TIMEOUT_US	2000	/* CVP STATUS timeout for USERMODE polling */
+
+/* Vendor Specific Extended Capability Registers */
+#define VSE_PCIE_EXT_CAP_ID		0x200
+#define VSE_PCIE_EXT_CAP_ID_VAL		0x000b	/* 16bit */
+
+#define VSE_CVP_STATUS			0x21c	/* 32bit */
+#define VSE_CVP_STATUS_CFG_RDY		BIT(18)	/* CVP_CONFIG_READY */
+#define VSE_CVP_STATUS_CFG_ERR		BIT(19)	/* CVP_CONFIG_ERROR */
+#define VSE_CVP_STATUS_CVP_EN		BIT(20)	/* ctrl block is enabling CVP */
+#define VSE_CVP_STATUS_USERMODE		BIT(21)	/* USERMODE */
+#define VSE_CVP_STATUS_CFG_DONE		BIT(23)	/* CVP_CONFIG_DONE */
+#define VSE_CVP_STATUS_PLD_CLK_IN_USE	BIT(24)	/* PLD_CLK_IN_USE */
+
+#define VSE_CVP_MODE_CTRL		0x220	/* 32bit */
+#define VSE_CVP_MODE_CTRL_CVP_MODE	BIT(0)	/* CVP (1) or normal mode (0) */
+#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL	BIT(1) /* PMA (1) or fabric clock (0) */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF	8	/* NUMCLKS bits offset */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK	GENMASK(15, 8)
+
+#define VSE_CVP_DATA			0x228	/* 32bit */
+#define VSE_CVP_PROG_CTRL		0x22c	/* 32bit */
+#define VSE_CVP_PROG_CTRL_CONFIG	BIT(0)
+#define VSE_CVP_PROG_CTRL_START_XFER	BIT(1)
+
+#define VSE_UNCOR_ERR_STATUS		0x234	/* 32bit */
+#define VSE_UNCOR_ERR_CVP_CFG_ERR	BIT(5)	/* CVP_CONFIG_ERROR_LATCHED */
+
+#define DRV_NAME		"altera-cvp"
+#define ALTERA_CVP_MGR_NAME	"Altera CvP FPGA Manager"
+
+/* Optional CvP config error status check for debugging */
+static bool altera_cvp_chkcfg;
+
+struct altera_cvp_conf {
+	struct fpga_manager	*mgr;
+	struct pci_dev		*pci_dev;
+	void __iomem		*map;
+	void			(*write_data)(struct altera_cvp_conf *, u32);
+	char			mgr_name[64];
+	u8			numclks;
+};
+
+static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	u32 status;
+
+	pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+
+	if (status & VSE_CVP_STATUS_CFG_DONE)
+		return FPGA_MGR_STATE_OPERATING;
+
+	if (status & VSE_CVP_STATUS_CVP_EN)
+		return FPGA_MGR_STATE_POWER_UP;
+
+	return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
+{
+	writel(val, conf->map);
+}
+
+static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
+{
+	pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+}
+
+/* switches between CvP clock and internal clock */
+static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
+{
+	unsigned int i;
+	u32 val;
+
+	/* set 1 CVP clock cycle for every CVP Data Register Write */
+	pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+	val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+	val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+	pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+
+	for (i = 0; i < CVP_DUMMY_WR; i++)
+		conf->write_data(conf, 0); /* dummy data, could be any value */
+}
+
+static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
+				  u32 status_val, int timeout_us)
+{
+	unsigned int retries;
+	u32 val;
+
+	retries = timeout_us / 10;
+	if (timeout_us % 10)
+		retries++;
+
+	do {
+		pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+		if ((val & status_mask) == status_val)
+			return 0;
+
+		/* use small usleep value to re-check and break early */
+		usleep_range(10, 11);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int altera_cvp_teardown(struct fpga_manager *mgr,
+			       struct fpga_image_info *info)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	struct pci_dev *pdev = conf->pci_dev;
+	int ret;
+	u32 val;
+
+	/* STEP 12 - reset START_XFER bit */
+	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+	val &= ~VSE_CVP_PROG_CTRL_START_XFER;
+	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+	/* STEP 13 - reset CVP_CONFIG bit */
+	val &= ~VSE_CVP_PROG_CTRL_CONFIG;
+	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+	/*
+	 * STEP 14
+	 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
+	 *   writes to the HIP
+	 */
+	altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+
+	/* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
+	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+	if (ret)
+		dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
+
+	return ret;
+}
+
+static int altera_cvp_write_init(struct fpga_manager *mgr,
+				 struct fpga_image_info *info,
+				 const char *buf, size_t count)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	struct pci_dev *pdev = conf->pci_dev;
+	u32 iflags, val;
+	int ret;
+
+	iflags = info ? info->flags : 0;
+
+	if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
+		dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+		return -EINVAL;
+	}
+
+	/* Determine allowed clock to data ratio */
+	if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
+		conf->numclks = 8; /* ratio for all compressed images */
+	else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+		conf->numclks = 4; /* for uncompressed and encrypted images */
+	else
+		conf->numclks = 1; /* for uncompressed and unencrypted images */
+
+	/* STEP 1 - read CVP status and check CVP_EN flag */
+	pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+	if (!(val & VSE_CVP_STATUS_CVP_EN)) {
+		dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
+		return -ENODEV;
+	}
+
+	if (val & VSE_CVP_STATUS_CFG_RDY) {
+		dev_warn(&mgr->dev, "CvP already started, teardown first\n");
+		ret = altera_cvp_teardown(mgr, info);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * STEP 2
+	 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
+	 */
+	/* switch from fabric to PMA clock */
+	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+	val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+	/* set CVP mode */
+	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+	val |= VSE_CVP_MODE_CTRL_CVP_MODE;
+	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+	/*
+	 * STEP 3
+	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+	 */
+	altera_cvp_dummy_write(conf);
+
+	/* STEP 4 - set CVP_CONFIG bit */
+	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+	/* request control block to begin transfer using CVP */
+	val |= VSE_CVP_PROG_CTRL_CONFIG;
+	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+	/* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
+				     VSE_CVP_STATUS_CFG_RDY, 10);
+	if (ret) {
+		dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+		return ret;
+	}
+
+	/*
+	 * STEP 6
+	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+	 */
+	altera_cvp_dummy_write(conf);
+
+	/* STEP 7 - set START_XFER */
+	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+	val |= VSE_CVP_PROG_CTRL_START_XFER;
+	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+	/* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
+	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+	val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+	val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+	return 0;
+}
+
+static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	u32 val;
+
+	/* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+	pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+	if (val & VSE_CVP_STATUS_CFG_ERR) {
+		dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+			bytes);
+		return -EPROTO;
+	}
+	return 0;
+}
+
+static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
+			    size_t count)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	const u32 *data;
+	size_t done, remaining;
+	int status = 0;
+	u32 mask;
+
+	/* STEP 9 - write 32-bit data from RBF file to CVP data register */
+	data = (u32 *)buf;
+	remaining = count;
+	done = 0;
+
+	while (remaining >= 4) {
+		conf->write_data(conf, *data++);
+		done += 4;
+		remaining -= 4;
+
+		/*
+		 * STEP 10 (optional) and STEP 11
+		 * - check error flag
+		 * - loop until data transfer completed
+		 * Config images can be huge (more than 40 MiB), so
+		 * only check after a new 4k data block has been written.
+		 * This reduces the number of checks and speeds up the
+		 * configuration process.
+		 */
+		if (altera_cvp_chkcfg && !(done % SZ_4K)) {
+			status = altera_cvp_chk_error(mgr, done);
+			if (status < 0)
+				return status;
+		}
+	}
+
+	/* write up to 3 trailing bytes, if any */
+	mask = BIT(remaining * 8) - 1;
+	if (mask)
+		conf->write_data(conf, *data & mask);
+
+	if (altera_cvp_chkcfg)
+		status = altera_cvp_chk_error(mgr, count);
+
+	return status;
+}
+
+static int altera_cvp_write_complete(struct fpga_manager *mgr,
+				     struct fpga_image_info *info)
+{
+	struct altera_cvp_conf *conf = mgr->priv;
+	struct pci_dev *pdev = conf->pci_dev;
+	int ret;
+	u32 mask;
+	u32 val;
+
+	ret = altera_cvp_teardown(mgr, info);
+	if (ret)
+		return ret;
+
+	/* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
+	pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+	if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
+		dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
+		return -EPROTO;
+	}
+
+	/* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
+	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+	val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+	val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
+	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+	/* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
+	mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
+	ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+	if (ret)
+		dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
+
+	return ret;
+}
+
+static const struct fpga_manager_ops altera_cvp_ops = {
+	.state		= altera_cvp_state,
+	.write_init	= altera_cvp_write_init,
+	.write		= altera_cvp_write,
+	.write_complete	= altera_cvp_write_complete,
+};
+
+static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+{
+	return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
+}
+
+static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+			    size_t count)
+{
+	int ret;
+
+	ret = kstrtobool(buf, &altera_cvp_chkcfg);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *dev_id);
+static void altera_cvp_remove(struct pci_dev *pdev);
+
+#define PCI_VENDOR_ID_ALTERA	0x1172
+
+static struct pci_device_id altera_cvp_id_tbl[] = {
+	{ PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
+
+static struct pci_driver altera_cvp_driver = {
+	.name   = DRV_NAME,
+	.id_table = altera_cvp_id_tbl,
+	.probe  = altera_cvp_probe,
+	.remove = altera_cvp_remove,
+};
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *dev_id)
+{
+	struct altera_cvp_conf *conf;
+	u16 cmd, val;
+	int ret;
+
+	/*
+	 * First check if this is the expected FPGA device. PCI config
+	 * space access works without enabling the PCI device, memory
+	 * space access is enabled further down.
+	 */
+	pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+	if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
+		dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+		return -ENODEV;
+	}
+
+	conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+	if (!conf)
+		return -ENOMEM;
+
+	/*
+	 * Enable memory BAR access. We cannot use pci_enable_device() here
+	 * because it will make the driver unusable with FPGA devices that
+	 * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
+	 * platform. Such BARs will not have an assigned address range and
+	 * pci_enable_device() will fail, complaining about not claimed BAR,
+	 * even if the concerned BAR is not needed for FPGA configuration
+	 * at all. Thus, enable the device via PCI config space command.
+	 */
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	if (!(cmd & PCI_COMMAND_MEMORY)) {
+		cmd |= PCI_COMMAND_MEMORY;
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
+
+	ret = pci_request_region(pdev, CVP_BAR, "CVP");
+	if (ret) {
+		dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
+		goto err_disable;
+	}
+
+	conf->pci_dev = pdev;
+	conf->write_data = altera_cvp_write_data_iomem;
+
+	conf->map = pci_iomap(pdev, CVP_BAR, 0);
+	if (!conf->map) {
+		dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
+		conf->write_data = altera_cvp_write_data_config;
+	}
+
+	snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
+		 ALTERA_CVP_MGR_NAME, pci_name(pdev));
+
+	ret = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+				&altera_cvp_ops, conf);
+	if (ret)
+		goto err_unmap;
+
+	ret = driver_create_file(&altera_cvp_driver.driver,
+				 &driver_attr_chkcfg);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
+		fpga_mgr_unregister(&pdev->dev);
+		goto err_unmap;
+	}
+
+	return 0;
+
+err_unmap:
+	pci_iounmap(pdev, conf->map);
+	pci_release_region(pdev, CVP_BAR);
+err_disable:
+	cmd &= ~PCI_COMMAND_MEMORY;
+	pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	return ret;
+}
+
+static void altera_cvp_remove(struct pci_dev *pdev)
+{
+	struct fpga_manager *mgr = pci_get_drvdata(pdev);
+	struct altera_cvp_conf *conf = mgr->priv;
+	u16 cmd;
+
+	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+	fpga_mgr_unregister(&pdev->dev);
+	pci_iounmap(pdev, conf->map);
+	pci_release_region(pdev, CVP_BAR);
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	cmd &= ~PCI_COMMAND_MEMORY;
+	pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+module_pci_driver(altera_cvp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");

+ 8 - 4
drivers/fpga/altera-hps2fpga.c

@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
 
 
 /* The L3 REMAP register is write only, so keep a cached value. */
 /* The L3 REMAP register is write only, so keep a cached value. */
 static unsigned int l3_remap_shadow;
 static unsigned int l3_remap_shadow;
-static spinlock_t l3_remap_lock;
+static DEFINE_SPINLOCK(l3_remap_lock);
 
 
 static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
 static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
 				    bool enable)
 				    bool enable)
@@ -143,9 +143,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
 	int ret;
 	int ret;
 
 
 	of_id = of_match_device(altera_fpga_of_match, dev);
 	of_id = of_match_device(altera_fpga_of_match, dev);
+	if (!of_id) {
+		dev_err(dev, "failed to match device\n");
+		return -ENODEV;
+	}
+
 	priv = (struct altera_hps2fpga_data *)of_id->data;
 	priv = (struct altera_hps2fpga_data *)of_id->data;
 
 
-	priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+	priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
+								     0);
 	if (IS_ERR(priv->bridge_reset)) {
 	if (IS_ERR(priv->bridge_reset)) {
 		dev_err(dev, "Could not get %s reset control\n", priv->name);
 		dev_err(dev, "Could not get %s reset control\n", priv->name);
 		return PTR_ERR(priv->bridge_reset);
 		return PTR_ERR(priv->bridge_reset);
@@ -171,8 +177,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
-	spin_lock_init(&l3_remap_lock);
-
 	if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
 	if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
 		if (enable > 1) {
 		if (enable > 1) {
 			dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
 			dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);

+ 308 - 0
drivers/fpga/altera-ps-spi.c

@@ -0,0 +1,308 @@
+/*
+ * Altera Passive Serial SPI Driver
+ *
+ *  Copyright (c) 2017 United Western Technologies, Corporation
+ *
+ *  Joshua Clayton <stillcompiling@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Manage Altera FPGA firmware that is loaded over SPI using the passive
+ * serial configuration method.
+ * Firmware must be in binary "rbf" format.
+ * Works on Arria 10, Cyclone V and Stratix V. Should work on Cyclone series.
+ * May work on other Altera FPGAs.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+enum altera_ps_devtype {
+	CYCLONE5,
+	ARRIA10,
+};
+
+struct altera_ps_data {
+	enum altera_ps_devtype devtype;
+	int status_wait_min_us;
+	int status_wait_max_us;
+	int t_cfg_us;
+	int t_st2ck_us;
+};
+
+struct altera_ps_conf {
+	struct gpio_desc *config;
+	struct gpio_desc *confd;
+	struct gpio_desc *status;
+	struct spi_device *spi;
+	const struct altera_ps_data *data;
+	u32 info_flags;
+	char mgr_name[64];
+};
+
+/*          |   Arria 10  |   Cyclone5  |   Stratix5  |
+ * t_CF2ST0 |     [; 600] |     [; 600] |     [; 600] |ns
+ * t_CFG    |        [2;] |        [2;] |        [2;] |µs
+ * t_STATUS | [268; 3000] | [268; 1506] | [268; 1506] |µs
+ * t_CF2ST1 |    [; 3000] |    [; 1506] |    [; 1506] |µs
+ * t_CF2CK  |     [3010;] |     [1506;] |     [1506;] |µs
+ * t_ST2CK  |       [10;] |        [2;] |        [2;] |µs
+ * t_CD2UM  |  [175; 830] |  [175; 437] |  [175; 437] |µs
+ */
+static struct altera_ps_data c5_data = {
+	/* these values for Cyclone5 are compatible with Stratix5 */
+	.devtype = CYCLONE5,
+	.status_wait_min_us = 268,
+	.status_wait_max_us = 1506,
+	.t_cfg_us = 2,
+	.t_st2ck_us = 2,
+};
+
+static struct altera_ps_data a10_data = {
+	.devtype = ARRIA10,
+	.status_wait_min_us = 268,  /* min(t_STATUS) */
+	.status_wait_max_us = 3000, /* max(t_CF2ST1) */
+	.t_cfg_us = 2,    /* max { min(t_CFG), max(tCF2ST0) } */
+	.t_st2ck_us = 10, /* min(t_ST2CK) */
+};
+
+static const struct of_device_id of_ef_match[] = {
+	{ .compatible = "altr,fpga-passive-serial", .data = &c5_data },
+	{ .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_ef_match);
+
+static enum fpga_mgr_states altera_ps_state(struct fpga_manager *mgr)
+{
+	struct altera_ps_conf *conf = mgr->priv;
+
+	if (gpiod_get_value_cansleep(conf->status))
+		return FPGA_MGR_STATE_RESET;
+
+	return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline void altera_ps_delay(int delay_us)
+{
+	if (delay_us > 10)
+		usleep_range(delay_us, delay_us + 5);
+	else
+		udelay(delay_us);
+}
+
+static int altera_ps_write_init(struct fpga_manager *mgr,
+				struct fpga_image_info *info,
+				const char *buf, size_t count)
+{
+	struct altera_ps_conf *conf = mgr->priv;
+	int min, max, waits;
+	int i;
+
+	conf->info_flags = info->flags;
+
+	if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+		dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+		return -EINVAL;
+	}
+
+	gpiod_set_value_cansleep(conf->config, 1);
+
+	/* wait min reset pulse time */
+	altera_ps_delay(conf->data->t_cfg_us);
+
+	if (!gpiod_get_value_cansleep(conf->status)) {
+		dev_err(&mgr->dev, "Status pin failed to show a reset\n");
+		return -EIO;
+	}
+
+	gpiod_set_value_cansleep(conf->config, 0);
+
+	min = conf->data->status_wait_min_us;
+	max = conf->data->status_wait_max_us;
+	waits = max / min;
+	if (max % min)
+		waits++;
+
+	/* wait for max { max(t_STATUS), max(t_CF2ST1) } */
+	for (i = 0; i < waits; i++) {
+		usleep_range(min, min + 10);
+		if (!gpiod_get_value_cansleep(conf->status)) {
+			/* wait for min(t_ST2CK)*/
+			altera_ps_delay(conf->data->t_st2ck_us);
+			return 0;
+		}
+	}
+
+	dev_err(&mgr->dev, "Status pin not ready.\n");
+	return -EIO;
+}
+
+static void rev_buf(char *buf, size_t len)
+{
+	u32 *fw32 = (u32 *)buf;
+	size_t extra_bytes = (len & 0x03);
+	const u32 *fw_end = (u32 *)(buf + len - extra_bytes);
+
+	/* set buffer to lsb first */
+	while (fw32 < fw_end) {
+		*fw32 = bitrev8x4(*fw32);
+		fw32++;
+	}
+
+	if (extra_bytes) {
+		buf = (char *)fw_end;
+		while (extra_bytes) {
+			*buf = bitrev8(*buf);
+			buf++;
+			extra_bytes--;
+		}
+	}
+}
+
+static int altera_ps_write(struct fpga_manager *mgr, const char *buf,
+			   size_t count)
+{
+	struct altera_ps_conf *conf = mgr->priv;
+	const char *fw_data = buf;
+	const char *fw_data_end = fw_data + count;
+
+	while (fw_data < fw_data_end) {
+		int ret;
+		size_t stride = min_t(size_t, fw_data_end - fw_data, SZ_4K);
+
+		if (!(conf->info_flags & FPGA_MGR_BITSTREAM_LSB_FIRST))
+			rev_buf((char *)fw_data, stride);
+
+		ret = spi_write(conf->spi, fw_data, stride);
+		if (ret) {
+			dev_err(&mgr->dev, "spi error in firmware write: %d\n",
+				ret);
+			return ret;
+		}
+		fw_data += stride;
+	}
+
+	return 0;
+}
+
+static int altera_ps_write_complete(struct fpga_manager *mgr,
+				    struct fpga_image_info *info)
+{
+	struct altera_ps_conf *conf = mgr->priv;
+	const char dummy[] = {0};
+	int ret;
+
+	if (gpiod_get_value_cansleep(conf->status)) {
+		dev_err(&mgr->dev, "Error during configuration.\n");
+		return -EIO;
+	}
+
+	if (!IS_ERR(conf->confd)) {
+		if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+			dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+			return -EIO;
+		}
+	}
+
+	/*
+	 * After CONF_DONE goes high, send two additional falling edges on DCLK
+	 * to begin initialization and enter user mode
+	 */
+	ret = spi_write(conf->spi, dummy, 1);
+	if (ret) {
+		dev_err(&mgr->dev, "spi error during end sequence: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct fpga_manager_ops altera_ps_ops = {
+	.state = altera_ps_state,
+	.write_init = altera_ps_write_init,
+	.write = altera_ps_write,
+	.write_complete = altera_ps_write_complete,
+};
+
+static int altera_ps_probe(struct spi_device *spi)
+{
+	struct altera_ps_conf *conf;
+	const struct of_device_id *of_id;
+
+	conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+	if (!conf)
+		return -ENOMEM;
+
+	of_id = of_match_device(of_ef_match, &spi->dev);
+	if (!of_id)
+		return -ENODEV;
+
+	conf->data = of_id->data;
+	conf->spi = spi;
+	conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+	if (IS_ERR(conf->config)) {
+		dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+			PTR_ERR(conf->config));
+		return PTR_ERR(conf->config);
+	}
+
+	conf->status = devm_gpiod_get(&spi->dev, "nstat", GPIOD_IN);
+	if (IS_ERR(conf->status)) {
+		dev_err(&spi->dev, "Failed to get status gpio: %ld\n",
+			PTR_ERR(conf->status));
+		return PTR_ERR(conf->status);
+	}
+
+	conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+	if (IS_ERR(conf->confd)) {
+		dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
+			 PTR_ERR(conf->confd));
+	}
+
+	/* Register manager with unique name */
+	snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
+		 dev_driver_string(&spi->dev), dev_name(&spi->dev));
+
+	return fpga_mgr_register(&spi->dev, conf->mgr_name,
+				 &altera_ps_ops, conf);
+}
+
+static int altera_ps_remove(struct spi_device *spi)
+{
+	fpga_mgr_unregister(&spi->dev);
+
+	return 0;
+}
+
+static const struct spi_device_id altera_ps_spi_ids[] = {
+	{"cyclone-ps-spi", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
+
+static struct spi_driver altera_ps_driver = {
+	.driver = {
+		.name = "altera-ps-spi",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(of_ef_match),
+	},
+	.id_table = altera_ps_spi_ids,
+	.probe = altera_ps_probe,
+	.remove = altera_ps_remove,
+};
+
+module_spi_driver(altera_ps_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joshua Clayton <stillcompiling@gmail.com>");
+MODULE_DESCRIPTION("Module to load Altera FPGA firmware over SPI");

+ 2 - 2
drivers/fpga/fpga-region.c

@@ -319,8 +319,8 @@ static int child_regions_with_firmware(struct device_node *overlay)
 	of_node_put(child_region);
 	of_node_put(child_region);
 
 
 	if (ret)
 	if (ret)
-		pr_err("firmware-name not allowed in child FPGA region: %s",
-		       child_region->full_name);
+		pr_err("firmware-name not allowed in child FPGA region: %pOF",
+		       child_region);
 
 
 	return ret;
 	return ret;
 }
 }

+ 2 - 2
drivers/fsi/fsi-core.c

@@ -475,7 +475,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
 	return count;
 	return count;
 }
 }
 
 
-static struct bin_attribute fsi_slave_raw_attr = {
+static const struct bin_attribute fsi_slave_raw_attr = {
 	.attr = {
 	.attr = {
 		.name = "raw",
 		.name = "raw",
 		.mode = 0600,
 		.mode = 0600,
@@ -499,7 +499,7 @@ static ssize_t fsi_slave_sysfs_term_write(struct file *file,
 	return count;
 	return count;
 }
 }
 
 
-static struct bin_attribute fsi_slave_term_attr = {
+static const struct bin_attribute fsi_slave_term_attr = {
 	.attr = {
 	.attr = {
 		.name = "term",
 		.name = "term",
 		.mode = 0200,
 		.mode = 0200,

+ 4 - 6
drivers/fsi/fsi-scom.c

@@ -57,12 +57,6 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
 	int rc;
 	int rc;
 	uint32_t data;
 	uint32_t data;
 
 
-	data = cpu_to_be32(SCOM_RESET_CMD);
-	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
-				sizeof(uint32_t));
-	if (rc)
-		return rc;
-
 	data = cpu_to_be32((value >> 32) & 0xffffffff);
 	data = cpu_to_be32((value >> 32) & 0xffffffff);
 	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
 	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
 				sizeof(uint32_t));
 				sizeof(uint32_t));
@@ -186,6 +180,7 @@ static const struct file_operations scom_fops = {
 
 
 static int scom_probe(struct device *dev)
 static int scom_probe(struct device *dev)
 {
 {
+	uint32_t data;
 	struct fsi_device *fsi_dev = to_fsi_dev(dev);
 	struct fsi_device *fsi_dev = to_fsi_dev(dev);
 	struct scom_device *scom;
 	struct scom_device *scom;
 
 
@@ -202,6 +197,9 @@ static int scom_probe(struct device *dev)
 	scom->mdev.parent = dev;
 	scom->mdev.parent = dev;
 	list_add(&scom->link, &scom_devices);
 	list_add(&scom->link, &scom_devices);
 
 
+	data = cpu_to_be32(SCOM_RESET_CMD);
+	fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+
 	return misc_register(&scom->mdev);
 	return misc_register(&scom->mdev);
 }
 }
 
 

+ 14 - 0
drivers/hv/channel.c

@@ -177,6 +177,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 		      &vmbus_connection.chn_msg_list);
 		      &vmbus_connection.chn_msg_list);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
 
+	if (newchannel->rescind) {
+		err = -ENODEV;
+		goto error_free_gpadl;
+	}
+
 	ret = vmbus_post_msg(open_msg,
 	ret = vmbus_post_msg(open_msg,
 			     sizeof(struct vmbus_channel_open_channel), true);
 			     sizeof(struct vmbus_channel_open_channel), true);
 
 
@@ -421,6 +426,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
 
 
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
 
+	if (channel->rescind) {
+		ret = -ENODEV;
+		goto cleanup;
+	}
+
 	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
 			     sizeof(*msginfo), true);
 			     sizeof(*msginfo), true);
 	if (ret != 0)
 	if (ret != 0)
@@ -494,6 +504,10 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
 	list_add_tail(&info->msglistentry,
 	list_add_tail(&info->msglistentry,
 		      &vmbus_connection.chn_msg_list);
 		      &vmbus_connection.chn_msg_list);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+	if (channel->rescind)
+		goto post_msg_err;
+
 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
 			     true);
 			     true);
 
 

+ 26 - 3
drivers/hv/channel_mgmt.c

@@ -451,6 +451,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	/* Make sure this is a new offer */
 	/* Make sure this is a new offer */
 	mutex_lock(&vmbus_connection.channel_mutex);
 	mutex_lock(&vmbus_connection.channel_mutex);
 
 
+	/*
+	 * Now that we have acquired the channel_mutex,
+	 * we can release the potentially racing rescind thread.
+	 */
+	atomic_dec(&vmbus_connection.offer_in_progress);
+
 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
 		if (!uuid_le_cmp(channel->offermsg.offer.if_type,
 		if (!uuid_le_cmp(channel->offermsg.offer.if_type,
 			newchannel->offermsg.offer.if_type) &&
 			newchannel->offermsg.offer.if_type) &&
@@ -481,7 +487,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 			channel->num_sc++;
 			channel->num_sc++;
 			spin_unlock_irqrestore(&channel->lock, flags);
 			spin_unlock_irqrestore(&channel->lock, flags);
 		} else {
 		} else {
-			atomic_dec(&vmbus_connection.offer_in_progress);
 			goto err_free_chan;
 			goto err_free_chan;
 		}
 		}
 	}
 	}
@@ -510,7 +515,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	if (!fnew) {
 	if (!fnew) {
 		if (channel->sc_creation_callback != NULL)
 		if (channel->sc_creation_callback != NULL)
 			channel->sc_creation_callback(newchannel);
 			channel->sc_creation_callback(newchannel);
-		atomic_dec(&vmbus_connection.offer_in_progress);
 		return;
 		return;
 	}
 	}
 
 
@@ -541,7 +545,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 		goto err_deq_chan;
 		goto err_deq_chan;
 	}
 	}
 
 
-	atomic_dec(&vmbus_connection.offer_in_progress);
+	newchannel->probe_done = true;
 	return;
 	return;
 
 
 err_deq_chan:
 err_deq_chan:
@@ -882,8 +886,27 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 	channel->rescind = true;
 	channel->rescind = true;
 	spin_unlock_irqrestore(&channel->lock, flags);
 	spin_unlock_irqrestore(&channel->lock, flags);
 
 
+	/*
+	 * Now that we have posted the rescind state, perform
+	 * rescind related cleanup.
+	 */
 	vmbus_rescind_cleanup(channel);
 	vmbus_rescind_cleanup(channel);
 
 
+	/*
+	 * Now wait for offer handling to complete.
+	 */
+	while (READ_ONCE(channel->probe_done) == false) {
+		/*
+		 * We wait here until any channel offer is currently
+		 * being processed.
+		 */
+		msleep(1);
+	}
+
+	/*
+	 * At this point, the rescind handling can proceed safely.
+	 */
+
 	if (channel->device_obj) {
 	if (channel->device_obj) {
 		if (channel->chn_rescind_callback) {
 		if (channel->chn_rescind_callback) {
 			channel->chn_rescind_callback(channel);
 			channel->chn_rescind_callback(channel);

+ 6 - 6
drivers/hv/hv_balloon.c

@@ -584,10 +584,6 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
 
 
 	switch (val) {
 	switch (val) {
 	case MEM_ONLINE:
 	case MEM_ONLINE:
-		spin_lock_irqsave(&dm_device.ha_lock, flags);
-		dm_device.num_pages_onlined += mem->nr_pages;
-		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
-		/* Fall through */
 	case MEM_CANCEL_ONLINE:
 	case MEM_CANCEL_ONLINE:
 		if (dm_device.ha_waiting) {
 		if (dm_device.ha_waiting) {
 			dm_device.ha_waiting = false;
 			dm_device.ha_waiting = false;
@@ -644,6 +640,9 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
 	__online_page_set_limits(pg);
 	__online_page_set_limits(pg);
 	__online_page_increment_counters(pg);
 	__online_page_increment_counters(pg);
 	__online_page_free(pg);
 	__online_page_free(pg);
+
+	WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+	dm_device.num_pages_onlined++;
 }
 }
 
 
 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
@@ -1036,8 +1035,8 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 		if (info_hdr->data_size == sizeof(__u64)) {
 		if (info_hdr->data_size == sizeof(__u64)) {
 			__u64 *max_page_count = (__u64 *)&info_hdr[1];
 			__u64 *max_page_count = (__u64 *)&info_hdr[1];
 
 
-			pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
-				*max_page_count);
+			pr_info("Max. dynamic memory size: %llu MB\n",
+				(*max_page_count) >> (20 - PAGE_SHIFT));
 		}
 		}
 
 
 		break;
 		break;
@@ -1656,6 +1655,7 @@ static int balloon_probe(struct hv_device *dev,
 	}
 	}
 
 
 	dm_device.state = DM_INITIALIZED;
 	dm_device.state = DM_INITIALIZED;
+	last_post_time = jiffies;
 
 
 	return 0;
 	return 0;
 
 

+ 1 - 1
drivers/hv/hv_kvp.c

@@ -304,7 +304,7 @@ static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
 				strlen((char *)in->body.kvp_ip_val.adapter_id),
 				strlen((char *)in->body.kvp_ip_val.adapter_id),
 				UTF16_HOST_ENDIAN,
 				UTF16_HOST_ENDIAN,
 				(wchar_t *)out->kvp_ip_val.adapter_id,
 				(wchar_t *)out->kvp_ip_val.adapter_id,
-				MAX_IP_ADDR_SIZE);
+				MAX_ADAPTER_ID_SIZE);
 		if (len < 0)
 		if (len < 0)
 			return len;
 			return len;
 
 

+ 60 - 109
drivers/hv/ring_buffer.c

@@ -29,6 +29,7 @@
 #include <linux/uio.h>
 #include <linux/uio.h>
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 
 
 #include "hyperv_vmbus.h"
 #include "hyperv_vmbus.h"
 
 
@@ -94,30 +95,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
 	ring_info->ring_buffer->write_index = next_write_location;
 	ring_info->ring_buffer->write_index = next_write_location;
 }
 }
 
 
-/* Get the next read location for the specified ring buffer. */
-static inline u32
-hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
-{
-	return ring_info->ring_buffer->read_index;
-}
-
-/*
- * Get the next read location + offset for the specified ring buffer.
- * This allows the caller to skip.
- */
-static inline u32
-hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
-				    u32 offset)
-{
-	u32 next = ring_info->ring_buffer->read_index;
-
-	next += offset;
-	if (next >= ring_info->ring_datasize)
-		next -= ring_info->ring_datasize;
-
-	return next;
-}
-
 /* Set the next read location for the specified ring buffer. */
 /* Set the next read location for the specified ring buffer. */
 static inline void
 static inline void
 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
@@ -141,29 +118,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
 	return (u64)ring_info->ring_buffer->write_index << 32;
 	return (u64)ring_info->ring_buffer->write_index << 32;
 }
 }
 
 
-/*
- * Helper routine to copy to source from ring buffer.
- * Assume there is enough room. Handles wrap-around in src case only!!
- */
-static u32 hv_copyfrom_ringbuffer(
-	const struct hv_ring_buffer_info *ring_info,
-	void				*dest,
-	u32				destlen,
-	u32				start_read_offset)
-{
-	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
-
-	memcpy(dest, ring_buffer + start_read_offset, destlen);
-
-	start_read_offset += destlen;
-	if (start_read_offset >= ring_buffer_size)
-		start_read_offset -= ring_buffer_size;
-
-	return start_read_offset;
-}
-
-
 /*
 /*
  * Helper routine to copy from source to ring buffer.
  * Helper routine to copy from source to ring buffer.
  * Assume there is enough room. Handles wrap-around in dest case only!!
  * Assume there is enough room. Handles wrap-around in dest case only!!
@@ -334,33 +288,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void
-init_cached_read_index(struct hv_ring_buffer_info *rbi)
-{
-	rbi->cached_read_index = rbi->ring_buffer->read_index;
-}
-
 int hv_ringbuffer_read(struct vmbus_channel *channel,
 int hv_ringbuffer_read(struct vmbus_channel *channel,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
 		       u64 *requestid, bool raw)
 		       u64 *requestid, bool raw)
 {
 {
-	u32 bytes_avail_toread;
-	u32 next_read_location;
-	u64 prev_indices = 0;
-	struct vmpacket_descriptor desc;
-	u32 offset;
-	u32 packetlen;
-	struct hv_ring_buffer_info *inring_info = &channel->inbound;
-
-	if (buflen <= 0)
+	struct vmpacket_descriptor *desc;
+	u32 packetlen, offset;
+
+	if (unlikely(buflen == 0))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	*buffer_actual_len = 0;
 	*buffer_actual_len = 0;
 	*requestid = 0;
 	*requestid = 0;
 
 
-	bytes_avail_toread = hv_get_bytes_to_read(inring_info);
 	/* Make sure there is something to read */
 	/* Make sure there is something to read */
-	if (bytes_avail_toread < sizeof(desc)) {
+	desc = hv_pkt_iter_first(channel);
+	if (desc == NULL) {
 		/*
 		/*
 		 * No error is set when there is even no header, drivers are
 		 * No error is set when there is even no header, drivers are
 		 * supposed to analyze buffer_actual_len.
 		 * supposed to analyze buffer_actual_len.
@@ -368,48 +311,22 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	init_cached_read_index(inring_info);
-
-	next_read_location = hv_get_next_read_location(inring_info);
-	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
-						    sizeof(desc),
-						    next_read_location);
-
-	offset = raw ? 0 : (desc.offset8 << 3);
-	packetlen = (desc.len8 << 3) - offset;
+	offset = raw ? 0 : (desc->offset8 << 3);
+	packetlen = (desc->len8 << 3) - offset;
 	*buffer_actual_len = packetlen;
 	*buffer_actual_len = packetlen;
-	*requestid = desc.trans_id;
-
-	if (bytes_avail_toread < packetlen + offset)
-		return -EAGAIN;
+	*requestid = desc->trans_id;
 
 
-	if (packetlen > buflen)
+	if (unlikely(packetlen > buflen))
 		return -ENOBUFS;
 		return -ENOBUFS;
 
 
-	next_read_location =
-		hv_get_next_readlocation_withoffset(inring_info, offset);
+	/* since ring is double mapped, only one copy is necessary */
+	memcpy(buffer, (const char *)desc + offset, packetlen);
 
 
-	next_read_location = hv_copyfrom_ringbuffer(inring_info,
-						buffer,
-						packetlen,
-						next_read_location);
+	/* Advance ring index to next packet descriptor */
+	__hv_pkt_iter_next(channel, desc);
 
 
-	next_read_location = hv_copyfrom_ringbuffer(inring_info,
-						&prev_indices,
-						sizeof(u64),
-						next_read_location);
-
-	/*
-	 * Make sure all reads are done before we update the read index since
-	 * the writer may start writing to the read area once the read index
-	 * is updated.
-	 */
-	virt_mb();
-
-	/* Update the read index */
-	hv_set_next_read_location(inring_info, next_read_location);
-
-	hv_signal_on_read(channel);
+	/* Notify host of update */
+	hv_pkt_iter_close(channel);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -440,14 +357,16 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
 {
 {
 	struct hv_ring_buffer_info *rbi = &channel->inbound;
 	struct hv_ring_buffer_info *rbi = &channel->inbound;
-
-	/* set state for later hv_signal_on_read() */
-	init_cached_read_index(rbi);
+	struct vmpacket_descriptor *desc;
 
 
 	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
 	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
 		return NULL;
 		return NULL;
 
 
-	return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+	desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+	if (desc)
+		prefetch((char *)desc + (desc->len8 << 3));
+
+	return desc;
 }
 }
 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
 
 
@@ -471,10 +390,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
 		rbi->priv_read_index -= dsize;
 		rbi->priv_read_index -= dsize;
 
 
 	/* more data? */
 	/* more data? */
-	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
-		return NULL;
-	else
-		return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+	return hv_pkt_iter_first(channel);
 }
 }
 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
 
 
@@ -484,6 +400,7 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
 void hv_pkt_iter_close(struct vmbus_channel *channel)
 void hv_pkt_iter_close(struct vmbus_channel *channel)
 {
 {
 	struct hv_ring_buffer_info *rbi = &channel->inbound;
 	struct hv_ring_buffer_info *rbi = &channel->inbound;
+	u32 orig_write_sz = hv_get_bytes_to_write(rbi);
 
 
 	/*
 	/*
 	 * Make sure all reads are done before we update the read index since
 	 * Make sure all reads are done before we update the read index since
@@ -493,6 +410,40 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
 	virt_rmb();
 	virt_rmb();
 	rbi->ring_buffer->read_index = rbi->priv_read_index;
 	rbi->ring_buffer->read_index = rbi->priv_read_index;
 
 
-	hv_signal_on_read(channel);
+	/*
+	 * Issue a full memory barrier before making the signaling decision.
+	 * Here is the reason for having this barrier:
+	 * If the reading of the pend_sz (in this function)
+	 * were to be reordered and read before we commit the new read
+	 * index (in the calling function)  we could
+	 * have a problem. If the host were to set the pending_sz after we
+	 * have sampled pending_sz and go to sleep before we commit the
+	 * read index, we could miss sending the interrupt. Issue a full
+	 * memory barrier to address this.
+	 */
+	virt_mb();
+
+	/* If host has disabled notifications then skip */
+	if (rbi->ring_buffer->interrupt_mask)
+		return;
+
+	if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
+		u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+
+		/*
+		 * If there was space before we began iteration,
+		 * then host was not blocked. Also handles case where
+		 * pending_sz is zero then host has nothing pending
+		 * and does not need to be signaled.
+		 */
+		if (orig_write_sz > pending_sz)
+			return;
+
+		/* If pending write will not fit, don't give false hope. */
+		if (hv_get_bytes_to_write(rbi) < pending_sz)
+			return;
+	}
+
+	vmbus_setevent(channel);
 }
 }
 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);

+ 3 - 0
drivers/hv/vmbus_drv.c

@@ -940,6 +940,9 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
 			if (channel->offermsg.child_relid != relid)
 			if (channel->offermsg.child_relid != relid)
 				continue;
 				continue;
 
 
+			if (channel->rescind)
+				continue;
+
 			switch (channel->callback_mode) {
 			switch (channel->callback_mode) {
 			case HV_CALL_ISR:
 			case HV_CALL_ISR:
 				vmbus_channel_isr(channel);
 				vmbus_channel_isr(channel);

+ 5 - 5
drivers/hwtracing/coresight/Kconfig

@@ -70,13 +70,13 @@ config CORESIGHT_SOURCE_ETM4X
 	  for instruction level tracing. Depending on the implemented version
 	  for instruction level tracing. Depending on the implemented version
 	  data tracing may also be available.
 	  data tracing may also be available.
 
 
-config CORESIGHT_QCOM_REPLICATOR
-	bool "Qualcomm CoreSight Replicator driver"
+config CORESIGHT_DYNAMIC_REPLICATOR
+	bool "CoreSight Programmable Replicator driver"
 	depends on CORESIGHT_LINKS_AND_SINKS
 	depends on CORESIGHT_LINKS_AND_SINKS
 	help
 	help
-	  This enables support for Qualcomm CoreSight link driver. The
-	  programmable ATB replicator sends the ATB trace stream from the
-	  ETB/ETF to the TPIUi and ETR.
+	  This enables support for dynamic CoreSight replicator link driver.
+	  The programmable ATB replicator allows independent filtering of the
+	  trace data based on the traceid.
 
 
 config CORESIGHT_STM
 config CORESIGHT_STM
 	bool "CoreSight System Trace Macrocell driver"
 	bool "CoreSight System Trace Macrocell driver"

+ 1 - 1
drivers/hwtracing/coresight/Makefile

@@ -14,6 +14,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
 					coresight-etm3x-sysfs.o
 					coresight-etm3x-sysfs.o
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
 					coresight-etm4x-sysfs.o
 					coresight-etm4x-sysfs.o
-obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
 obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
 obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
 obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
 obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o

+ 1 - 1
drivers/hwtracing/coresight/coresight-cpu-debug.c

@@ -667,7 +667,7 @@ static int debug_remove(struct amba_device *adev)
 	return 0;
 	return 0;
 }
 }
 
 
-static struct amba_id debug_ids[] = {
+static const struct amba_id debug_ids[] = {
 	{       /* Debug for Cortex-A53 */
 	{       /* Debug for Cortex-A53 */
 		.id	= 0x000bbd03,
 		.id	= 0x000bbd03,
 		.mask	= 0x000fffff,
 		.mask	= 0x000fffff,

+ 30 - 4
drivers/hwtracing/coresight/coresight-replicator-qcom.c → drivers/hwtracing/coresight/coresight-dynamic-replicator.c

@@ -95,6 +95,28 @@ static const struct coresight_ops replicator_cs_ops = {
 	.link_ops	= &replicator_link_ops,
 	.link_ops	= &replicator_link_ops,
 };
 };
 
 
+#define coresight_replicator_reg(name, offset) \
+	coresight_simple_reg32(struct replicator_state, name, offset)
+
+coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
+coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
+
+static struct attribute *replicator_mgmt_attrs[] = {
+	&dev_attr_idfilter0.attr,
+	&dev_attr_idfilter1.attr,
+	NULL,
+};
+
+static const struct attribute_group replicator_mgmt_group = {
+	.attrs = replicator_mgmt_attrs,
+	.name = "mgmt",
+};
+
+static const struct attribute_group *replicator_groups[] = {
+	&replicator_mgmt_group,
+	NULL,
+};
+
 static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
 static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
 {
 {
 	int ret;
 	int ret;
@@ -139,11 +161,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
 	desc.ops = &replicator_cs_ops;
 	desc.ops = &replicator_cs_ops;
 	desc.pdata = adev->dev.platform_data;
 	desc.pdata = adev->dev.platform_data;
 	desc.dev = &adev->dev;
 	desc.dev = &adev->dev;
+	desc.groups = replicator_groups;
 	drvdata->csdev = coresight_register(&desc);
 	drvdata->csdev = coresight_register(&desc);
 	if (IS_ERR(drvdata->csdev))
 	if (IS_ERR(drvdata->csdev))
 		return PTR_ERR(drvdata->csdev);
 		return PTR_ERR(drvdata->csdev);
 
 
-	dev_info(dev, "%s initialized\n", (char *)id->data);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -175,18 +197,22 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
 			   NULL)
 			   NULL)
 };
 };
 
 
-static struct amba_id replicator_ids[] = {
+static const struct amba_id replicator_ids[] = {
 	{
 	{
 		.id     = 0x0003b909,
 		.id     = 0x0003b909,
 		.mask   = 0x0003ffff,
 		.mask   = 0x0003ffff,
-		.data	= "REPLICATOR 1.0",
+	},
+	{
+		/* Coresight SoC-600 */
+		.id     = 0x000bb9ec,
+		.mask   = 0x000fffff,
 	},
 	},
 	{ 0, 0 },
 	{ 0, 0 },
 };
 };
 
 
 static struct amba_driver replicator_driver = {
 static struct amba_driver replicator_driver = {
 	.drv = {
 	.drv = {
-		.name	= "coresight-replicator-qcom",
+		.name	= "coresight-dynamic-replicator",
 		.pm	= &replicator_dev_pm_ops,
 		.pm	= &replicator_dev_pm_ops,
 		.suppress_bind_attrs = true,
 		.suppress_bind_attrs = true,
 	},
 	},

+ 43 - 25
drivers/hwtracing/coresight/coresight-etb10.c

@@ -200,8 +200,10 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
 
 
 static void etb_dump_hw(struct etb_drvdata *drvdata)
 static void etb_dump_hw(struct etb_drvdata *drvdata)
 {
 {
+	bool lost = false;
 	int i;
 	int i;
 	u8 *buf_ptr;
 	u8 *buf_ptr;
+	const u32 *barrier;
 	u32 read_data, depth;
 	u32 read_data, depth;
 	u32 read_ptr, write_ptr;
 	u32 read_ptr, write_ptr;
 	u32 frame_off, frame_endoff;
 	u32 frame_off, frame_endoff;
@@ -223,20 +225,26 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
 	}
 	}
 
 
 	if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
 	if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
-		      & ETB_STATUS_RAM_FULL) == 0)
+		      & ETB_STATUS_RAM_FULL) == 0) {
 		writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
 		writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
-	else
+	} else {
 		writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 		writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+		lost = true;
+	}
 
 
 	depth = drvdata->buffer_depth;
 	depth = drvdata->buffer_depth;
 	buf_ptr = drvdata->buf;
 	buf_ptr = drvdata->buf;
+	barrier = barrier_pkt;
 	for (i = 0; i < depth; i++) {
 	for (i = 0; i < depth; i++) {
 		read_data = readl_relaxed(drvdata->base +
 		read_data = readl_relaxed(drvdata->base +
 					  ETB_RAM_READ_DATA_REG);
 					  ETB_RAM_READ_DATA_REG);
-		*buf_ptr++ = read_data >> 0;
-		*buf_ptr++ = read_data >> 8;
-		*buf_ptr++ = read_data >> 16;
-		*buf_ptr++ = read_data >> 24;
+		if (lost && *barrier) {
+			read_data = *barrier;
+			barrier++;
+		}
+
+		*(u32 *)buf_ptr = read_data;
+		buf_ptr += 4;
 	}
 	}
 
 
 	if (frame_off) {
 	if (frame_off) {
@@ -353,8 +361,10 @@ static void etb_update_buffer(struct coresight_device *csdev,
 			      struct perf_output_handle *handle,
 			      struct perf_output_handle *handle,
 			      void *sink_config)
 			      void *sink_config)
 {
 {
+	bool lost = false;
 	int i, cur;
 	int i, cur;
 	u8 *buf_ptr;
 	u8 *buf_ptr;
+	const u32 *barrier;
 	u32 read_ptr, write_ptr, capacity;
 	u32 read_ptr, write_ptr, capacity;
 	u32 status, read_data, to_read;
 	u32 status, read_data, to_read;
 	unsigned long offset;
 	unsigned long offset;
@@ -366,8 +376,8 @@ static void etb_update_buffer(struct coresight_device *csdev,
 
 
 	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
 	capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
 
 
-	CS_UNLOCK(drvdata->base);
 	etb_disable_hw(drvdata);
 	etb_disable_hw(drvdata);
+	CS_UNLOCK(drvdata->base);
 
 
 	/* unit is in words, not bytes */
 	/* unit is in words, not bytes */
 	read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
 	read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
@@ -384,7 +394,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
 			(unsigned long)write_ptr);
 			(unsigned long)write_ptr);
 
 
 		write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
 		write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
-		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+		lost = true;
 	}
 	}
 
 
 	/*
 	/*
@@ -395,7 +405,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
 	 */
 	 */
 	status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
 	status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
 	if (status & ETB_STATUS_RAM_FULL) {
 	if (status & ETB_STATUS_RAM_FULL) {
-		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+		lost = true;
 		to_read = capacity;
 		to_read = capacity;
 		read_ptr = write_ptr;
 		read_ptr = write_ptr;
 	} else {
 	} else {
@@ -428,22 +438,30 @@ static void etb_update_buffer(struct coresight_device *csdev,
 		if (read_ptr > (drvdata->buffer_depth - 1))
 		if (read_ptr > (drvdata->buffer_depth - 1))
 			read_ptr -= drvdata->buffer_depth;
 			read_ptr -= drvdata->buffer_depth;
 		/* let the decoder know we've skipped ahead */
 		/* let the decoder know we've skipped ahead */
-		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+		lost = true;
 	}
 	}
 
 
+	if (lost)
+		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
 	/* finally tell HW where we want to start reading from */
 	/* finally tell HW where we want to start reading from */
 	writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 	writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
 
 
 	cur = buf->cur;
 	cur = buf->cur;
 	offset = buf->offset;
 	offset = buf->offset;
+	barrier = barrier_pkt;
+
 	for (i = 0; i < to_read; i += 4) {
 	for (i = 0; i < to_read; i += 4) {
 		buf_ptr = buf->data_pages[cur] + offset;
 		buf_ptr = buf->data_pages[cur] + offset;
 		read_data = readl_relaxed(drvdata->base +
 		read_data = readl_relaxed(drvdata->base +
 					  ETB_RAM_READ_DATA_REG);
 					  ETB_RAM_READ_DATA_REG);
-		*buf_ptr++ = read_data >> 0;
-		*buf_ptr++ = read_data >> 8;
-		*buf_ptr++ = read_data >> 16;
-		*buf_ptr++ = read_data >> 24;
+		if (lost && *barrier) {
+			read_data = *barrier;
+			barrier++;
+		}
+
+		*(u32 *)buf_ptr = read_data;
+		buf_ptr += 4;
 
 
 		offset += 4;
 		offset += 4;
 		if (offset >= PAGE_SIZE) {
 		if (offset >= PAGE_SIZE) {
@@ -557,17 +575,17 @@ static const struct file_operations etb_fops = {
 	.llseek		= no_llseek,
 	.llseek		= no_llseek,
 };
 };
 
 
-#define coresight_etb10_simple_func(name, offset)                       \
-	coresight_simple_func(struct etb_drvdata, NULL, name, offset)
+#define coresight_etb10_reg(name, offset)		\
+	coresight_simple_reg32(struct etb_drvdata, name, offset)
 
 
-coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
-coresight_etb10_simple_func(sts, ETB_STATUS_REG);
-coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
-coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
-coresight_etb10_simple_func(trg, ETB_TRG);
-coresight_etb10_simple_func(ctl, ETB_CTL_REG);
-coresight_etb10_simple_func(ffsr, ETB_FFSR);
-coresight_etb10_simple_func(ffcr, ETB_FFCR);
+coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
+coresight_etb10_reg(sts, ETB_STATUS_REG);
+coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
+coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
+coresight_etb10_reg(trg, ETB_TRG);
+coresight_etb10_reg(ctl, ETB_CTL_REG);
+coresight_etb10_reg(ffsr, ETB_FFSR);
+coresight_etb10_reg(ffcr, ETB_FFCR);
 
 
 static struct attribute *coresight_etb_mgmt_attrs[] = {
 static struct attribute *coresight_etb_mgmt_attrs[] = {
 	&dev_attr_rdp.attr,
 	&dev_attr_rdp.attr,
@@ -728,7 +746,7 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
 	SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
 };
 };
 
 
-static struct amba_id etb_ids[] = {
+static const struct amba_id etb_ids[] = {
 	{
 	{
 		.id	= 0x0003b907,
 		.id	= 0x0003b907,
 		.mask	= 0x0003ffff,
 		.mask	= 0x0003ffff,

+ 3 - 1
drivers/hwtracing/coresight/coresight-etm-perf.c

@@ -53,14 +53,16 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
 /* ETMv3.5/PTM's ETMCR is 'config' */
 /* ETMv3.5/PTM's ETMCR is 'config' */
 PMU_FORMAT_ATTR(cycacc,		"config:" __stringify(ETM_OPT_CYCACC));
 PMU_FORMAT_ATTR(cycacc,		"config:" __stringify(ETM_OPT_CYCACC));
 PMU_FORMAT_ATTR(timestamp,	"config:" __stringify(ETM_OPT_TS));
 PMU_FORMAT_ATTR(timestamp,	"config:" __stringify(ETM_OPT_TS));
+PMU_FORMAT_ATTR(retstack,	"config:" __stringify(ETM_OPT_RETSTK));
 
 
 static struct attribute *etm_config_formats_attr[] = {
 static struct attribute *etm_config_formats_attr[] = {
 	&format_attr_cycacc.attr,
 	&format_attr_cycacc.attr,
 	&format_attr_timestamp.attr,
 	&format_attr_timestamp.attr,
+	&format_attr_retstack.attr,
 	NULL,
 	NULL,
 };
 };
 
 
-static struct attribute_group etm_pmu_format_group = {
+static const struct attribute_group etm_pmu_format_group = {
 	.name   = "format",
 	.name   = "format",
 	.attrs  = etm_config_formats_attr,
 	.attrs  = etm_config_formats_attr,
 };
 };

+ 1 - 0
drivers/hwtracing/coresight/coresight-etm.h

@@ -106,6 +106,7 @@
 #define ETMTECR1_START_STOP	BIT(25)
 #define ETMTECR1_START_STOP	BIT(25)
 /* ETMCCER - 0x1E8 */
 /* ETMCCER - 0x1E8 */
 #define ETMCCER_TIMESTAMP	BIT(22)
 #define ETMCCER_TIMESTAMP	BIT(22)
+#define ETMCCER_RETSTACK	BIT(23)
 
 
 #define ETM_MODE_EXCLUDE	BIT(0)
 #define ETM_MODE_EXCLUDE	BIT(0)
 #define ETM_MODE_CYCACC		BIT(1)
 #define ETM_MODE_CYCACC		BIT(1)

+ 13 - 13
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c

@@ -1232,19 +1232,19 @@ static struct attribute *coresight_etm_attrs[] = {
 	NULL,
 	NULL,
 };
 };
 
 
-#define coresight_etm3x_simple_func(name, offset)			\
-	coresight_simple_func(struct etm_drvdata, NULL, name, offset)
-
-coresight_etm3x_simple_func(etmccr, ETMCCR);
-coresight_etm3x_simple_func(etmccer, ETMCCER);
-coresight_etm3x_simple_func(etmscr, ETMSCR);
-coresight_etm3x_simple_func(etmidr, ETMIDR);
-coresight_etm3x_simple_func(etmcr, ETMCR);
-coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
-coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
-coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
-coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
+#define coresight_etm3x_reg(name, offset)			\
+	coresight_simple_reg32(struct etm_drvdata, name, offset)
+
+coresight_etm3x_reg(etmccr, ETMCCR);
+coresight_etm3x_reg(etmccer, ETMCCER);
+coresight_etm3x_reg(etmscr, ETMSCR);
+coresight_etm3x_reg(etmidr, ETMIDR);
+coresight_etm3x_reg(etmcr, ETMCR);
+coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
+coresight_etm3x_reg(etmteevr, ETMTEEVR);
+coresight_etm3x_reg(etmtssvr, ETMTSSCR);
+coresight_etm3x_reg(etmtecr1, ETMTECR1);
+coresight_etm3x_reg(etmtecr2, ETMTECR2);
 
 
 static struct attribute *coresight_etm_mgmt_attrs[] = {
 static struct attribute *coresight_etm_mgmt_attrs[] = {
 	&dev_attr_etmccr.attr,
 	&dev_attr_etmccr.attr,

+ 18 - 4
drivers/hwtracing/coresight/coresight-etm3x.c

@@ -243,6 +243,8 @@ void etm_set_default(struct etm_config *config)
 	}
 	}
 
 
 	config->ctxid_mask = 0x0;
 	config->ctxid_mask = 0x0;
+	/* Setting default to 1024 as per TRM recommendation */
+	config->sync_freq = 0x400;
 }
 }
 
 
 void etm_config_trace_mode(struct etm_config *config)
 void etm_config_trace_mode(struct etm_config *config)
@@ -308,7 +310,9 @@ void etm_config_trace_mode(struct etm_config *config)
 	config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 	config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 }
 }
 
 
-#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
+				 ETMCR_TIMESTAMP_EN | \
+				 ETMCR_RETURN_STACK)
 
 
 static int etm_parse_event_config(struct etm_drvdata *drvdata,
 static int etm_parse_event_config(struct etm_drvdata *drvdata,
 				  struct perf_event *event)
 				  struct perf_event *event)
@@ -339,14 +343,24 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
 		etm_config_trace_mode(config);
 		etm_config_trace_mode(config);
 
 
 	/*
 	/*
-	 * At this time only cycle accurate and timestamp options are
-	 * available.
+	 * At this time only cycle accurate, return stack  and timestamp
+	 * options are available.
 	 */
 	 */
 	if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
 	if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	config->ctrl = attr->config;
 	config->ctrl = attr->config;
 
 
+	/*
+	 * Possible to have cores with PTM (supports ret stack) and ETM
+	 * (never has ret stack) on the same SoC. So if we have a request
+	 * for return stack that can't be honoured on this core then
+	 * clear the bit - trace will still continue normally
+	 */
+	if ((config->ctrl & ETMCR_RETURN_STACK) &&
+	    !(drvdata->etmccer & ETMCCER_RETSTACK))
+		config->ctrl &= ~ETMCR_RETURN_STACK;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -885,7 +899,7 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
 	SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
 };
 };
 
 
-static struct amba_id etm_ids[] = {
+static const struct amba_id etm_ids[] = {
 	{	/* ETM 3.3 */
 	{	/* ETM 3.3 */
 		.id	= 0x0003b921,
 		.id	= 0x0003b921,
 		.mask	= 0x0003ffff,
 		.mask	= 0x0003ffff,

+ 12 - 12
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c

@@ -2066,23 +2066,23 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
 	return reg.data;
 	return reg.data;
 }
 }
 
 
-#define coresight_etm4x_simple_func(name, offset)			\
-	coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
+#define coresight_etm4x_reg(name, offset)			\
+	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
 
 
 #define coresight_etm4x_cross_read(name, offset)			\
 #define coresight_etm4x_cross_read(name, offset)			\
 	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
 	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
 			      name, offset)
 			      name, offset)
 
 
-coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
-coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
-coresight_etm4x_simple_func(trclsr, TRCLSR);
-coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_simple_func(trcdevid, TRCDEVID);
-coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
-coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
-coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
-coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+coresight_etm4x_reg(trcpdcr, TRCPDCR);
+coresight_etm4x_reg(trcpdsr, TRCPDSR);
+coresight_etm4x_reg(trclsr, TRCLSR);
+coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_reg(trcdevid, TRCDEVID);
+coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_reg(trcpidr0, TRCPIDR0);
+coresight_etm4x_reg(trcpidr1, TRCPIDR1);
+coresight_etm4x_reg(trcpidr2, TRCPIDR2);
+coresight_etm4x_reg(trcpidr3, TRCPIDR3);
 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);

+ 5 - 1
drivers/hwtracing/coresight/coresight-etm4x.c

@@ -224,6 +224,10 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
 	if (attr->config & BIT(ETM_OPT_TS))
 	if (attr->config & BIT(ETM_OPT_TS))
 		/* bit[11], Global timestamp tracing bit */
 		/* bit[11], Global timestamp tracing bit */
 		config->cfg |= BIT(11);
 		config->cfg |= BIT(11);
+	/* return stack - enable if selected and supported */
+	if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
+		/* bit[12], Return stack enable bit */
+		config->cfg |= BIT(12);
 
 
 out:
 out:
 	return ret;
 	return ret;
@@ -1048,7 +1052,7 @@ err_arch_supported:
 	return ret;
 	return ret;
 }
 }
 
 
-static struct amba_id etm4_ids[] = {
+static const struct amba_id etm4_ids[] = {
 	{       /* ETM 4.0 - Cortex-A53  */
 	{       /* ETM 4.0 - Cortex-A53  */
 		.id	= 0x000bb95d,
 		.id	= 0x000bb95d,
 		.mask	= 0x000fffff,
 		.mask	= 0x000fffff,

+ 6 - 1
drivers/hwtracing/coresight/coresight-funnel.c

@@ -246,11 +246,16 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
 	SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
 };
 };
 
 
-static struct amba_id funnel_ids[] = {
+static const struct amba_id funnel_ids[] = {
 	{
 	{
 		.id     = 0x0003b908,
 		.id     = 0x0003b908,
 		.mask   = 0x0003ffff,
 		.mask   = 0x0003ffff,
 	},
 	},
+	{
+		/* Coresight SoC-600 */
+		.id     = 0x000bb9eb,
+		.mask   = 0x000fffff,
+	},
 	{ 0, 0},
 	{ 0, 0},
 };
 };
 
 

+ 34 - 5
drivers/hwtracing/coresight/coresight-priv.h

@@ -39,23 +39,33 @@
 #define ETM_MODE_EXCL_USER	BIT(31)
 #define ETM_MODE_EXCL_USER	BIT(31)
 
 
 typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
 typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
-#define coresight_simple_func(type, func, name, offset)			\
+#define __coresight_simple_func(type, func, name, lo_off, hi_off)	\
 static ssize_t name##_show(struct device *_dev,				\
 static ssize_t name##_show(struct device *_dev,				\
 			   struct device_attribute *attr, char *buf)	\
 			   struct device_attribute *attr, char *buf)	\
 {									\
 {									\
 	type *drvdata = dev_get_drvdata(_dev->parent);			\
 	type *drvdata = dev_get_drvdata(_dev->parent);			\
 	coresight_read_fn fn = func;					\
 	coresight_read_fn fn = func;					\
-	u32 val;							\
+	u64 val;							\
 	pm_runtime_get_sync(_dev->parent);				\
 	pm_runtime_get_sync(_dev->parent);				\
 	if (fn)								\
 	if (fn)								\
-		val = fn(_dev->parent, offset);				\
+		val = (u64)fn(_dev->parent, lo_off);			\
 	else								\
 	else								\
-		val = readl_relaxed(drvdata->base + offset);		\
+		val = coresight_read_reg_pair(drvdata->base,		\
+						 lo_off, hi_off);	\
 	pm_runtime_put_sync(_dev->parent);				\
 	pm_runtime_put_sync(_dev->parent);				\
-	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);		\
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val);		\
 }									\
 }									\
 static DEVICE_ATTR_RO(name)
 static DEVICE_ATTR_RO(name)
 
 
+#define coresight_simple_func(type, func, name, offset)			\
+	__coresight_simple_func(type, func, name, offset, -1)
+#define coresight_simple_reg32(type, name, offset)			\
+	__coresight_simple_func(type, NULL, name, offset, -1)
+#define coresight_simple_reg64(type, name, lo_off, hi_off)		\
+	__coresight_simple_func(type, NULL, name, lo_off, hi_off)
+
+extern const u32 barrier_pkt[5];
+
 enum etm_addr_type {
 enum etm_addr_type {
 	ETM_ADDR_TYPE_NONE,
 	ETM_ADDR_TYPE_NONE,
 	ETM_ADDR_TYPE_SINGLE,
 	ETM_ADDR_TYPE_SINGLE,
@@ -106,6 +116,25 @@ static inline void CS_UNLOCK(void __iomem *addr)
 	} while (0);
 	} while (0);
 }
 }
 
 
+static inline u64
+coresight_read_reg_pair(void __iomem *addr, s32 lo_offset, s32 hi_offset)
+{
+	u64 val;
+
+	val = readl_relaxed(addr + lo_offset);
+	val |= (hi_offset < 0) ? 0 :
+	       (u64)readl_relaxed(addr + hi_offset) << 32;
+	return val;
+}
+
+static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
+						 s32 lo_offset, s32 hi_offset)
+{
+	writel_relaxed((u32)val, addr + lo_offset);
+	if (hi_offset >= 0)
+		writel_relaxed((u32)(val >> 32), addr + hi_offset);
+}
+
 void coresight_disable_path(struct list_head *path);
 void coresight_disable_path(struct list_head *path);
 int coresight_enable_path(struct list_head *path, u32 mode);
 int coresight_enable_path(struct list_head *path, u32 mode);
 struct coresight_device *coresight_get_sink(struct list_head *path);
 struct coresight_device *coresight_get_sink(struct list_head *path);

+ 25 - 24
drivers/hwtracing/coresight/coresight-stm.c

@@ -276,7 +276,7 @@ static void stm_disable(struct coresight_device *csdev,
 		spin_unlock(&drvdata->spinlock);
 		spin_unlock(&drvdata->spinlock);
 
 
 		/* Wait until the engine has completely stopped */
 		/* Wait until the engine has completely stopped */
-		coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
+		coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
 
 
 		pm_runtime_put(drvdata->dev);
 		pm_runtime_put(drvdata->dev);
 
 
@@ -307,7 +307,8 @@ static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
 	return ((unsigned long)addr & (write_bytes - 1));
 	return ((unsigned long)addr & (write_bytes - 1));
 }
 }
 
 
-static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+static void stm_send(void __iomem *addr, const void *data,
+		     u32 size, u8 write_bytes)
 {
 {
 	u8 paload[8];
 	u8 paload[8];
 
 
@@ -414,7 +415,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
 				  unsigned int size,
 				  unsigned int size,
 				  const unsigned char *payload)
 				  const unsigned char *payload)
 {
 {
-	unsigned long ch_addr;
+	void __iomem *ch_addr;
 	struct stm_drvdata *drvdata = container_of(stm_data,
 	struct stm_drvdata *drvdata = container_of(stm_data,
 						   struct stm_drvdata, stm);
 						   struct stm_drvdata, stm);
 
 
@@ -424,7 +425,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
 	if (channel >= drvdata->numsp)
 	if (channel >= drvdata->numsp)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
+	ch_addr = stm_channel_addr(drvdata, channel);
 
 
 	flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
 	flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
 	flags |= test_bit(channel, drvdata->chs.guaranteed) ?
 	flags |= test_bit(channel, drvdata->chs.guaranteed) ?
@@ -437,20 +438,20 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
 
 
 	switch (packet) {
 	switch (packet) {
 	case STP_PACKET_FLAG:
 	case STP_PACKET_FLAG:
-		ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+		ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags);
 
 
 		/*
 		/*
 		 * The generic STM core sets a size of '0' on flag packets.
 		 * The generic STM core sets a size of '0' on flag packets.
 		 * As such send a flag packet of size '1' and tell the
 		 * As such send a flag packet of size '1' and tell the
 		 * core we did so.
 		 * core we did so.
 		 */
 		 */
-		stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
+		stm_send(ch_addr, payload, 1, drvdata->write_bytes);
 		size = 1;
 		size = 1;
 		break;
 		break;
 
 
 	case STP_PACKET_DATA:
 	case STP_PACKET_DATA:
-		ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
-		stm_send((void *)ch_addr, payload, size,
+		ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags);
+		stm_send(ch_addr, payload, size,
 				drvdata->write_bytes);
 				drvdata->write_bytes);
 		break;
 		break;
 
 
@@ -635,21 +636,21 @@ static ssize_t traceid_store(struct device *dev,
 }
 }
 static DEVICE_ATTR_RW(traceid);
 static DEVICE_ATTR_RW(traceid);
 
 
-#define coresight_stm_simple_func(name, offset)	\
-	coresight_simple_func(struct stm_drvdata, NULL, name, offset)
-
-coresight_stm_simple_func(tcsr, STMTCSR);
-coresight_stm_simple_func(tsfreqr, STMTSFREQR);
-coresight_stm_simple_func(syncr, STMSYNCR);
-coresight_stm_simple_func(sper, STMSPER);
-coresight_stm_simple_func(spter, STMSPTER);
-coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
-coresight_stm_simple_func(spscr, STMSPSCR);
-coresight_stm_simple_func(spmscr, STMSPMSCR);
-coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
-coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
-coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
-coresight_stm_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_stm_reg(name, offset)	\
+	coresight_simple_reg32(struct stm_drvdata, name, offset)
+
+coresight_stm_reg(tcsr, STMTCSR);
+coresight_stm_reg(tsfreqr, STMTSFREQR);
+coresight_stm_reg(syncr, STMSYNCR);
+coresight_stm_reg(sper, STMSPER);
+coresight_stm_reg(spter, STMSPTER);
+coresight_stm_reg(privmaskr, STMPRIVMASKR);
+coresight_stm_reg(spscr, STMSPSCR);
+coresight_stm_reg(spmscr, STMSPMSCR);
+coresight_stm_reg(spfeat1r, STMSPFEAT1R);
+coresight_stm_reg(spfeat2r, STMSPFEAT2R);
+coresight_stm_reg(spfeat3r, STMSPFEAT3R);
+coresight_stm_reg(devid, CORESIGHT_DEVID);
 
 
 static struct attribute *coresight_stm_attrs[] = {
 static struct attribute *coresight_stm_attrs[] = {
 	&dev_attr_hwevent_enable.attr,
 	&dev_attr_hwevent_enable.attr,
@@ -914,7 +915,7 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
 	SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
 };
 };
 
 
-static struct amba_id stm_ids[] = {
+static const struct amba_id stm_ids[] = {
 	{
 	{
 		.id     = 0x0003b962,
 		.id     = 0x0003b962,
 		.mask   = 0x0003ffff,
 		.mask   = 0x0003ffff,

+ 35 - 7
drivers/hwtracing/coresight/coresight-tmc-etf.c

@@ -43,17 +43,34 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
 
 
 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
 {
 {
+	bool lost = false;
 	char *bufp;
 	char *bufp;
-	u32 read_data;
+	const u32 *barrier;
+	u32 read_data, status;
 	int i;
 	int i;
 
 
+	/*
+	 * Get a hold of the status register and see if a wrap around
+	 * has occurred.
+	 */
+	status = readl_relaxed(drvdata->base + TMC_STS);
+	if (status & TMC_STS_FULL)
+		lost = true;
+
 	bufp = drvdata->buf;
 	bufp = drvdata->buf;
 	drvdata->len = 0;
 	drvdata->len = 0;
+	barrier = barrier_pkt;
 	while (1) {
 	while (1) {
 		for (i = 0; i < drvdata->memwidth; i++) {
 		for (i = 0; i < drvdata->memwidth; i++) {
 			read_data = readl_relaxed(drvdata->base + TMC_RRD);
 			read_data = readl_relaxed(drvdata->base + TMC_RRD);
 			if (read_data == 0xFFFFFFFF)
 			if (read_data == 0xFFFFFFFF)
 				return;
 				return;
+
+			if (lost && *barrier) {
+				read_data = *barrier;
+				barrier++;
+			}
+
 			memcpy(bufp, &read_data, 4);
 			memcpy(bufp, &read_data, 4);
 			bufp += 4;
 			bufp += 4;
 			drvdata->len += 4;
 			drvdata->len += 4;
@@ -369,9 +386,11 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
 				  struct perf_output_handle *handle,
 				  struct perf_output_handle *handle,
 				  void *sink_config)
 				  void *sink_config)
 {
 {
+	bool lost = false;
 	int i, cur;
 	int i, cur;
+	const u32 *barrier;
 	u32 *buf_ptr;
 	u32 *buf_ptr;
-	u32 read_ptr, write_ptr;
+	u64 read_ptr, write_ptr;
 	u32 status, to_read;
 	u32 status, to_read;
 	unsigned long offset;
 	unsigned long offset;
 	struct cs_buffers *buf = sink_config;
 	struct cs_buffers *buf = sink_config;
@@ -388,8 +407,8 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
 
 
 	tmc_flush_and_stop(drvdata);
 	tmc_flush_and_stop(drvdata);
 
 
-	read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
-	write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
+	read_ptr = tmc_read_rrp(drvdata);
+	write_ptr = tmc_read_rwp(drvdata);
 
 
 	/*
 	/*
 	 * Get a hold of the status register and see if a wrap around
 	 * Get a hold of the status register and see if a wrap around
@@ -397,7 +416,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
 	 */
 	 */
 	status = readl_relaxed(drvdata->base + TMC_STS);
 	status = readl_relaxed(drvdata->base + TMC_STS);
 	if (status & TMC_STS_FULL) {
 	if (status & TMC_STS_FULL) {
-		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+		lost = true;
 		to_read = drvdata->size;
 		to_read = drvdata->size;
 	} else {
 	} else {
 		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
 		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
@@ -441,18 +460,27 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
 		if (read_ptr > (drvdata->size - 1))
 		if (read_ptr > (drvdata->size - 1))
 			read_ptr -= drvdata->size;
 			read_ptr -= drvdata->size;
 		/* Tell the HW */
 		/* Tell the HW */
-		writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
-		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+		tmc_write_rrp(drvdata, read_ptr);
+		lost = true;
 	}
 	}
 
 
+	if (lost)
+		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
 	cur = buf->cur;
 	cur = buf->cur;
 	offset = buf->offset;
 	offset = buf->offset;
+	barrier = barrier_pkt;
 
 
 	/* for every byte to read */
 	/* for every byte to read */
 	for (i = 0; i < to_read; i += 4) {
 	for (i = 0; i < to_read; i += 4) {
 		buf_ptr = buf->data_pages[cur] + offset;
 		buf_ptr = buf->data_pages[cur] + offset;
 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
 
 
+		if (lost && *barrier) {
+			*buf_ptr = *barrier;
+			barrier++;
+		}
+
 		offset += 4;
 		offset += 4;
 		if (offset >= PAGE_SIZE) {
 		if (offset >= PAGE_SIZE) {
 			offset = 0;
 			offset = 0;

+ 37 - 12
drivers/hwtracing/coresight/coresight-tmc-etr.c

@@ -22,7 +22,7 @@
 
 
 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 {
 {
-	u32 axictl;
+	u32 axictl, sts;
 
 
 	/* Zero out the memory to help with debug */
 	/* Zero out the memory to help with debug */
 	memset(drvdata->vaddr, 0, drvdata->size);
 	memset(drvdata->vaddr, 0, drvdata->size);
@@ -36,17 +36,29 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
 
 
 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
-	axictl |= TMC_AXICTL_WR_BURST_16;
-	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
-	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-	axictl = (axictl &
-		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
-		  TMC_AXICTL_PROT_CTL_B1;
+	axictl &= ~TMC_AXICTL_CLEAR_MASK;
+	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+	axictl |= TMC_AXICTL_AXCACHE_OS;
+
+	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
+		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
+		axictl |= TMC_AXICTL_ARCACHE_OS;
+	}
+
 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+	tmc_write_dba(drvdata, drvdata->paddr);
+	/*
+	 * If the TMC pointers must be programmed before the session,
+	 * we have to set it properly (i.e, RRP/RWP to base address and
+	 * STS to "not full").
+	 */
+	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
+		tmc_write_rrp(drvdata, drvdata->paddr);
+		tmc_write_rwp(drvdata, drvdata->paddr);
+		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
+		writel_relaxed(sts, drvdata->base + TMC_STS);
+	}
 
 
-	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
-	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
 		       TMC_FFCR_TRIGON_TRIGIN,
 		       TMC_FFCR_TRIGON_TRIGIN,
@@ -59,9 +71,12 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
 
 
 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
 {
 {
-	u32 rwp, val;
+	const u32 *barrier;
+	u32 val;
+	u32 *temp;
+	u64 rwp;
 
 
-	rwp = readl_relaxed(drvdata->base + TMC_RWP);
+	rwp = tmc_read_rwp(drvdata);
 	val = readl_relaxed(drvdata->base + TMC_STS);
 	val = readl_relaxed(drvdata->base + TMC_STS);
 
 
 	/*
 	/*
@@ -71,6 +86,16 @@ static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
 	if (val & TMC_STS_FULL) {
 	if (val & TMC_STS_FULL) {
 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
 		drvdata->len = drvdata->size;
 		drvdata->len = drvdata->size;
+
+		barrier = barrier_pkt;
+		temp = (u32 *)drvdata->buf;
+
+		while (*barrier) {
+			*temp = *barrier;
+			temp++;
+			barrier++;
+		}
+
 	} else {
 	} else {
 		drvdata->buf = drvdata->vaddr;
 		drvdata->buf = drvdata->vaddr;
 		drvdata->len = rwp - drvdata->paddr;
 		drvdata->len = rwp - drvdata->paddr;

+ 85 - 23
drivers/hwtracing/coresight/coresight-tmc.c

@@ -217,20 +217,24 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
 	return memwidth;
 	return memwidth;
 }
 }
 
 
-#define coresight_tmc_simple_func(name, offset)			\
-	coresight_simple_func(struct tmc_drvdata, NULL, name, offset)
-
-coresight_tmc_simple_func(rsz, TMC_RSZ);
-coresight_tmc_simple_func(sts, TMC_STS);
-coresight_tmc_simple_func(rrp, TMC_RRP);
-coresight_tmc_simple_func(rwp, TMC_RWP);
-coresight_tmc_simple_func(trg, TMC_TRG);
-coresight_tmc_simple_func(ctl, TMC_CTL);
-coresight_tmc_simple_func(ffsr, TMC_FFSR);
-coresight_tmc_simple_func(ffcr, TMC_FFCR);
-coresight_tmc_simple_func(mode, TMC_MODE);
-coresight_tmc_simple_func(pscr, TMC_PSCR);
-coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_tmc_reg(name, offset)			\
+	coresight_simple_reg32(struct tmc_drvdata, name, offset)
+#define coresight_tmc_reg64(name, lo_off, hi_off)	\
+	coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
+
+coresight_tmc_reg(rsz, TMC_RSZ);
+coresight_tmc_reg(sts, TMC_STS);
+coresight_tmc_reg(trg, TMC_TRG);
+coresight_tmc_reg(ctl, TMC_CTL);
+coresight_tmc_reg(ffsr, TMC_FFSR);
+coresight_tmc_reg(ffcr, TMC_FFCR);
+coresight_tmc_reg(mode, TMC_MODE);
+coresight_tmc_reg(pscr, TMC_PSCR);
+coresight_tmc_reg(axictl, TMC_AXICTL);
+coresight_tmc_reg(devid, CORESIGHT_DEVID);
+coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
+coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
+coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
 
 
 static struct attribute *coresight_tmc_mgmt_attrs[] = {
 static struct attribute *coresight_tmc_mgmt_attrs[] = {
 	&dev_attr_rsz.attr,
 	&dev_attr_rsz.attr,
@@ -244,6 +248,8 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
 	&dev_attr_mode.attr,
 	&dev_attr_mode.attr,
 	&dev_attr_pscr.attr,
 	&dev_attr_pscr.attr,
 	&dev_attr_devid.attr,
 	&dev_attr_devid.attr,
+	&dev_attr_dba.attr,
+	&dev_attr_axictl.attr,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -293,6 +299,42 @@ const struct attribute_group *coresight_tmc_groups[] = {
 	NULL,
 	NULL,
 };
 };
 
 
+/* Detect and initialise the capabilities of a TMC ETR */
+static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
+			     u32 devid, void *dev_caps)
+{
+	u32 dma_mask = 0;
+
+	/* Set the unadvertised capabilities */
+	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
+
+	if (!(devid & TMC_DEVID_NOSCAT))
+		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
+
+	/* Check if the AXI address width is available */
+	if (devid & TMC_DEVID_AXIAW_VALID)
+		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
+				TMC_DEVID_AXIAW_MASK);
+
+	/*
+	 * Unless specified in the device configuration, ETR uses a 40-bit
+	 * AXI master in place of the embedded SRAM of ETB/ETF.
+	 */
+	switch (dma_mask) {
+	case 32:
+	case 40:
+	case 44:
+	case 48:
+	case 52:
+		dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
+		break;
+	default:
+		dma_mask = 40;
+	}
+
+	return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+}
+
 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 {
 {
 	int ret = 0;
 	int ret = 0;
@@ -354,25 +396,29 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 	desc.dev = dev;
 	desc.dev = dev;
 	desc.groups = coresight_tmc_groups;
 	desc.groups = coresight_tmc_groups;
 
 
-	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+	switch (drvdata->config_type) {
+	case TMC_CONFIG_TYPE_ETB:
 		desc.type = CORESIGHT_DEV_TYPE_SINK;
 		desc.type = CORESIGHT_DEV_TYPE_SINK;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 		desc.ops = &tmc_etb_cs_ops;
 		desc.ops = &tmc_etb_cs_ops;
-	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+		break;
+	case TMC_CONFIG_TYPE_ETR:
 		desc.type = CORESIGHT_DEV_TYPE_SINK;
 		desc.type = CORESIGHT_DEV_TYPE_SINK;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 		desc.ops = &tmc_etr_cs_ops;
 		desc.ops = &tmc_etr_cs_ops;
-		/*
-		 * ETR configuration uses a 40-bit AXI master in place of
-		 * the embedded SRAM of ETB/ETF.
-		 */
-		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+		ret = tmc_etr_setup_caps(drvdata, devid, id->data);
 		if (ret)
 		if (ret)
 			goto out;
 			goto out;
-	} else {
+		break;
+	case TMC_CONFIG_TYPE_ETF:
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
 		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
 		desc.ops = &tmc_etf_cs_ops;
 		desc.ops = &tmc_etf_cs_ops;
+		break;
+	default:
+		pr_err("%s: Unsupported TMC config\n", pdata->name);
+		ret = -EINVAL;
+		goto out;
 	}
 	}
 
 
 	drvdata->csdev = coresight_register(&desc);
 	drvdata->csdev = coresight_register(&desc);
@@ -391,11 +437,27 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
-static struct amba_id tmc_ids[] = {
+static const struct amba_id tmc_ids[] = {
 	{
 	{
 		.id     = 0x0003b961,
 		.id     = 0x0003b961,
 		.mask   = 0x0003ffff,
 		.mask   = 0x0003ffff,
 	},
 	},
+	{
+		/* Coresight SoC 600 TMC-ETR/ETS */
+		.id	= 0x000bb9e8,
+		.mask	= 0x000fffff,
+		.data	= (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
+	},
+	{
+		/* Coresight SoC 600 TMC-ETB */
+		.id	= 0x000bb9e9,
+		.mask	= 0x000fffff,
+	},
+	{
+		/* Coresight SoC 600 TMC-ETF */
+		.id	= 0x000bb9ea,
+		.mask	= 0x000fffff,
+	},
 	{ 0, 0},
 	{ 0, 0},
 };
 };
 
 

+ 84 - 1
drivers/hwtracing/coresight/coresight-tmc.h

@@ -54,11 +54,32 @@
 #define TMC_STS_TMCREADY_BIT	2
 #define TMC_STS_TMCREADY_BIT	2
 #define TMC_STS_FULL		BIT(0)
 #define TMC_STS_FULL		BIT(0)
 #define TMC_STS_TRIGGERED	BIT(1)
 #define TMC_STS_TRIGGERED	BIT(1)
-/* TMC_AXICTL - 0x110 */
+/*
+ * TMC_AXICTL - 0x110
+ *
+ * TMC AXICTL format for SoC-400
+ *	Bits [0-1]	: ProtCtrlBit0-1
+ *	Bits [2-5]	: CacheCtrlBits 0-3 (AXCACHE)
+ *	Bit  6		: Reserved
+ *	Bit  7		: ScatterGatherMode
+ *	Bits [8-11]	: WrBurstLen
+ *	Bits [12-31]	: Reserved.
+ * TMC AXICTL format for SoC-600, as above except:
+ *	Bits [2-5]	: AXI WCACHE
+ *	Bits [16-19]	: AXI RCACHE
+ *	Bits [20-31]	: Reserved
+ */
+#define TMC_AXICTL_CLEAR_MASK 0xfbf
+#define TMC_AXICTL_ARCACHE_MASK (0xf << 16)
+
 #define TMC_AXICTL_PROT_CTL_B0	BIT(0)
 #define TMC_AXICTL_PROT_CTL_B0	BIT(0)
 #define TMC_AXICTL_PROT_CTL_B1	BIT(1)
 #define TMC_AXICTL_PROT_CTL_B1	BIT(1)
 #define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
 #define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
 #define TMC_AXICTL_WR_BURST_16	0xF00
 #define TMC_AXICTL_WR_BURST_16	0xF00
+/* Write-back Read and Write-allocate */
+#define TMC_AXICTL_AXCACHE_OS	(0xf << 2)
+#define TMC_AXICTL_ARCACHE_OS	(0xf << 16)
+
 /* TMC_FFCR - 0x304 */
 /* TMC_FFCR - 0x304 */
 #define TMC_FFCR_FLUSHMAN_BIT	6
 #define TMC_FFCR_FLUSHMAN_BIT	6
 #define TMC_FFCR_EN_FMT		BIT(0)
 #define TMC_FFCR_EN_FMT		BIT(0)
@@ -69,6 +90,12 @@
 #define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
 #define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
 
 
 
 
+#define TMC_DEVID_NOSCAT	BIT(24)
+
+#define TMC_DEVID_AXIAW_VALID	BIT(16)
+#define TMC_DEVID_AXIAW_SHIFT	17
+#define TMC_DEVID_AXIAW_MASK	0x7f
+
 enum tmc_config_type {
 enum tmc_config_type {
 	TMC_CONFIG_TYPE_ETB,
 	TMC_CONFIG_TYPE_ETB,
 	TMC_CONFIG_TYPE_ETR,
 	TMC_CONFIG_TYPE_ETR,
@@ -88,6 +115,24 @@ enum tmc_mem_intf_width {
 	TMC_MEM_INTF_WIDTH_256BITS	= 8,
 	TMC_MEM_INTF_WIDTH_256BITS	= 8,
 };
 };
 
 
+/* TMC ETR Capability bit definitions */
+#define TMC_ETR_SG			(0x1U << 0)
+/* ETR has separate read/write cache encodings */
+#define TMC_ETR_AXI_ARCACHE		(0x1U << 1)
+/*
+ * TMC_ETR_SAVE_RESTORE - Values of RRP/RWP/STS.Full are
+ * retained when TMC leaves Disabled state, allowing us to continue
+ * the tracing from a point where we stopped. This also implies that
+ * the RRP/RWP/STS.Full should always be programmed to the correct
+ * value. Unfortunately this is not advertised by the hardware,
+ * so we have to rely on PID of the IP to detect the functionality.
+ */
+#define TMC_ETR_SAVE_RESTORE		(0x1U << 2)
+
+/* Coresight SoC-600 TMC-ETR unadvertised capabilities */
+#define CORESIGHT_SOC_600_ETR_CAPS	\
+	(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+
 /**
 /**
  * struct tmc_drvdata - specifics associated to an TMC component
  * struct tmc_drvdata - specifics associated to an TMC component
  * @base:	memory mapped base address for this component.
  * @base:	memory mapped base address for this component.
@@ -104,6 +149,8 @@ enum tmc_mem_intf_width {
  * @config_type: TMC variant, must be of type @tmc_config_type.
  * @config_type: TMC variant, must be of type @tmc_config_type.
  * @memwidth:	width of the memory interface databus, in bytes.
  * @memwidth:	width of the memory interface databus, in bytes.
  * @trigger_cntr: amount of words to store after a trigger.
  * @trigger_cntr: amount of words to store after a trigger.
+ * @etr_caps:	Bitmask of capabilities of the TMC ETR, inferred from the
+ *		device configuration register (DEVID)
  */
  */
 struct tmc_drvdata {
 struct tmc_drvdata {
 	void __iomem		*base;
 	void __iomem		*base;
@@ -121,6 +168,7 @@ struct tmc_drvdata {
 	enum tmc_config_type	config_type;
 	enum tmc_config_type	config_type;
 	enum tmc_mem_intf_width	memwidth;
 	enum tmc_mem_intf_width	memwidth;
 	u32			trigger_cntr;
 	u32			trigger_cntr;
+	u32			etr_caps;
 };
 };
 
 
 /* Generic functions */
 /* Generic functions */
@@ -139,4 +187,39 @@ extern const struct coresight_ops tmc_etf_cs_ops;
 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
 extern const struct coresight_ops tmc_etr_cs_ops;
 extern const struct coresight_ops tmc_etr_cs_ops;
+
+
+#define TMC_REG_PAIR(name, lo_off, hi_off)				\
+static inline u64							\
+tmc_read_##name(struct tmc_drvdata *drvdata)				\
+{									\
+	return coresight_read_reg_pair(drvdata->base, lo_off, hi_off);	\
+}									\
+static inline void							\
+tmc_write_##name(struct tmc_drvdata *drvdata, u64 val)			\
+{									\
+	coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off);	\
+}
+
+TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
+TMC_REG_PAIR(rwp, TMC_RWP, TMC_RWPHI)
+TMC_REG_PAIR(dba, TMC_DBALO, TMC_DBAHI)
+
+/* Initialise the caps from unadvertised static capabilities of the device */
+static inline void tmc_etr_init_caps(struct tmc_drvdata *drvdata, u32 dev_caps)
+{
+	WARN_ON(drvdata->etr_caps);
+	drvdata->etr_caps = dev_caps;
+}
+
+static inline void tmc_etr_set_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+	drvdata->etr_caps |= cap;
+}
+
+static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+	return !!(drvdata->etr_caps & cap);
+}
+
 #endif
 #endif

+ 6 - 1
drivers/hwtracing/coresight/coresight-tpiu.c

@@ -192,7 +192,7 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
 	SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
 	SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
 };
 };
 
 
-static struct amba_id tpiu_ids[] = {
+static const struct amba_id tpiu_ids[] = {
 	{
 	{
 		.id	= 0x0003b912,
 		.id	= 0x0003b912,
 		.mask	= 0x0003ffff,
 		.mask	= 0x0003ffff,
@@ -201,6 +201,11 @@ static struct amba_id tpiu_ids[] = {
 		.id	= 0x0004b912,
 		.id	= 0x0004b912,
 		.mask	= 0x0007ffff,
 		.mask	= 0x0007ffff,
 	},
 	},
+	{
+		/* Coresight SoC-600 */
+		.id	= 0x000bb9e7,
+		.mask	= 0x000fffff,
+	},
 	{ 0, 0},
 	{ 0, 0},
 };
 };
 
 

+ 8 - 0
drivers/hwtracing/coresight/coresight.c

@@ -53,6 +53,14 @@ static DEFINE_PER_CPU(struct list_head *, tracer_path);
  */
  */
 static struct list_head *stm_path;
 static struct list_head *stm_path;
 
 
+/*
+ * When losing synchronisation a new barrier packet needs to be inserted at the
+ * beginning of the data collected in a buffer.  That way the decoder knows that
+ * it needs to look for another sync sequence.
+ */
+const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
+			    0x7fffffff, 0x7fffffff, 0x0};
+
 static int coresight_id_match(struct device *dev, void *data)
 static int coresight_id_match(struct device *dev, void *data)
 {
 {
 	int trace_id, i_trace_id;
 	int trace_id, i_trace_id;

+ 252 - 107
drivers/hwtracing/intel_th/core.c

@@ -101,17 +101,53 @@ out_pm:
 	return ret;
 	return ret;
 }
 }
 
 
+static void intel_th_device_remove(struct intel_th_device *thdev);
+
 static int intel_th_remove(struct device *dev)
 static int intel_th_remove(struct device *dev)
 {
 {
 	struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
 	struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
 	struct intel_th_device *thdev = to_intel_th_device(dev);
 	struct intel_th_device *thdev = to_intel_th_device(dev);
-	struct intel_th_device *hub = to_intel_th_device(dev->parent);
+	struct intel_th_device *hub = to_intel_th_hub(thdev);
 	int err;
 	int err;
 
 
 	if (thdev->type == INTEL_TH_SWITCH) {
 	if (thdev->type == INTEL_TH_SWITCH) {
+		struct intel_th *th = to_intel_th(hub);
+		int i, lowest;
+
+		/* disconnect outputs */
 		err = device_for_each_child(dev, thdev, intel_th_child_remove);
 		err = device_for_each_child(dev, thdev, intel_th_child_remove);
 		if (err)
 		if (err)
 			return err;
 			return err;
+
+		/*
+		 * Remove outputs, that is, hub's children: they are created
+		 * at hub's probe time by having the hub call
+		 * intel_th_output_enable() for each of them.
+		 */
+		for (i = 0, lowest = -1; i < th->num_thdevs; i++) {
+			/*
+			 * Move the non-output devices from higher up the
+			 * th->thdev[] array to lower positions to maintain
+			 * a contiguous array.
+			 */
+			if (th->thdev[i]->type != INTEL_TH_OUTPUT) {
+				if (lowest >= 0) {
+					th->thdev[lowest] = th->thdev[i];
+					th->thdev[i] = NULL;
+					++lowest;
+				}
+
+				continue;
+			}
+
+			if (lowest == -1)
+				lowest = i;
+
+			intel_th_device_remove(th->thdev[i]);
+			th->thdev[i] = NULL;
+		}
+
+		th->num_thdevs = lowest;
 	}
 	}
 
 
 	if (thdrv->attr_group)
 	if (thdrv->attr_group)
@@ -156,21 +192,6 @@ static struct device_type intel_th_source_device_type = {
 	.release	= intel_th_device_release,
 	.release	= intel_th_device_release,
 };
 };
 
 
-static struct intel_th *to_intel_th(struct intel_th_device *thdev)
-{
-	/*
-	 * subdevice tree is flat: if this one is not a switch, its
-	 * parent must be
-	 */
-	if (thdev->type != INTEL_TH_SWITCH)
-		thdev = to_intel_th_hub(thdev);
-
-	if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
-		return NULL;
-
-	return dev_get_drvdata(thdev->dev.parent);
-}
-
 static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
 static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
 				     kuid_t *uid, kgid_t *gid)
 				     kuid_t *uid, kgid_t *gid)
 {
 {
@@ -205,6 +226,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
 {
 {
 	struct intel_th_driver *thdrv =
 	struct intel_th_driver *thdrv =
 		to_intel_th_driver_or_null(thdev->dev.driver);
 		to_intel_th_driver_or_null(thdev->dev.driver);
+	struct intel_th *th = to_intel_th(thdev);
 	int ret = 0;
 	int ret = 0;
 
 
 	if (!thdrv)
 	if (!thdrv)
@@ -215,15 +237,28 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
 
 
 	pm_runtime_get_sync(&thdev->dev);
 	pm_runtime_get_sync(&thdev->dev);
 
 
+	if (th->activate)
+		ret = th->activate(th);
+	if (ret)
+		goto fail_put;
+
 	if (thdrv->activate)
 	if (thdrv->activate)
 		ret = thdrv->activate(thdev);
 		ret = thdrv->activate(thdev);
 	else
 	else
 		intel_th_trace_enable(thdev);
 		intel_th_trace_enable(thdev);
 
 
-	if (ret) {
-		pm_runtime_put(&thdev->dev);
-		module_put(thdrv->driver.owner);
-	}
+	if (ret)
+		goto fail_deactivate;
+
+	return 0;
+
+fail_deactivate:
+	if (th->deactivate)
+		th->deactivate(th);
+
+fail_put:
+	pm_runtime_put(&thdev->dev);
+	module_put(thdrv->driver.owner);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -232,6 +267,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
 {
 {
 	struct intel_th_driver *thdrv =
 	struct intel_th_driver *thdrv =
 		to_intel_th_driver_or_null(thdev->dev.driver);
 		to_intel_th_driver_or_null(thdev->dev.driver);
+	struct intel_th *th = to_intel_th(thdev);
 
 
 	if (!thdrv)
 	if (!thdrv)
 		return;
 		return;
@@ -241,6 +277,9 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
 	else
 	else
 		intel_th_trace_disable(thdev);
 		intel_th_trace_disable(thdev);
 
 
+	if (th->deactivate)
+		th->deactivate(th);
+
 	pm_runtime_put(&thdev->dev);
 	pm_runtime_put(&thdev->dev);
 	module_put(thdrv->driver.owner);
 	module_put(thdrv->driver.owner);
 }
 }
@@ -326,10 +365,10 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name,
 	struct device *parent;
 	struct device *parent;
 	struct intel_th_device *thdev;
 	struct intel_th_device *thdev;
 
 
-	if (type == INTEL_TH_SWITCH)
-		parent = th->dev;
-	else
+	if (type == INTEL_TH_OUTPUT)
 		parent = &th->hub->dev;
 		parent = &th->hub->dev;
+	else
+		parent = th->dev;
 
 
 	thdev = kzalloc(sizeof(*thdev) + strlen(name) + 1, GFP_KERNEL);
 	thdev = kzalloc(sizeof(*thdev) + strlen(name) + 1, GFP_KERNEL);
 	if (!thdev)
 	if (!thdev)
@@ -392,13 +431,14 @@ static const struct intel_th_subdevice {
 	unsigned		otype;
 	unsigned		otype;
 	unsigned		scrpd;
 	unsigned		scrpd;
 	int			id;
 	int			id;
-} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
+} intel_th_subdevices[] = {
 	{
 	{
 		.nres	= 1,
 		.nres	= 1,
 		.res	= {
 		.res	= {
 			{
 			{
+				/* Handle TSCU from GTH driver */
 				.start	= REG_GTH_OFFSET,
 				.start	= REG_GTH_OFFSET,
-				.end	= REG_GTH_OFFSET + REG_GTH_LENGTH - 1,
+				.end	= REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
 				.flags	= IORESOURCE_MEM,
 				.flags	= IORESOURCE_MEM,
 			},
 			},
 		},
 		},
@@ -479,6 +519,21 @@ static const struct intel_th_subdevice {
 		.otype	= GTH_PTI,
 		.otype	= GTH_PTI,
 		.scrpd	= SCRPD_PTI_IS_PRIM_DEST,
 		.scrpd	= SCRPD_PTI_IS_PRIM_DEST,
 	},
 	},
+	{
+		.nres	= 1,
+		.res	= {
+			{
+				.start	= REG_PTI_OFFSET,
+				.end	= REG_PTI_OFFSET + REG_PTI_LENGTH - 1,
+				.flags	= IORESOURCE_MEM,
+			},
+		},
+		.id	= -1,
+		.name	= "lpp",
+		.type	= INTEL_TH_OUTPUT,
+		.otype	= GTH_LPP,
+		.scrpd	= SCRPD_PTI_IS_PRIM_DEST,
+	},
 	{
 	{
 		.nres	= 1,
 		.nres	= 1,
 		.res	= {
 		.res	= {
@@ -526,98 +581,182 @@ static inline void intel_th_request_hub_module_flush(struct intel_th *th)
 }
 }
 #endif /* CONFIG_MODULES */
 #endif /* CONFIG_MODULES */
 
 
-static int intel_th_populate(struct intel_th *th, struct resource *devres,
-			     unsigned int ndevres, int irq)
+static struct intel_th_device *
+intel_th_subdevice_alloc(struct intel_th *th,
+			 const struct intel_th_subdevice *subdev)
 {
 {
+	struct intel_th_device *thdev;
 	struct resource res[3];
 	struct resource res[3];
 	unsigned int req = 0;
 	unsigned int req = 0;
-	int src, dst, err;
+	int r, err;
 
 
-	/* create devices for each intel_th_subdevice */
-	for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
-		const struct intel_th_subdevice *subdev =
-			&intel_th_subdevices[src];
-		struct intel_th_device *thdev;
-		int r;
+	thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
+				      subdev->id);
+	if (!thdev)
+		return ERR_PTR(-ENOMEM);
 
 
-		/* only allow SOURCE and SWITCH devices in host mode */
-		if (host_mode && subdev->type == INTEL_TH_OUTPUT)
-			continue;
+	thdev->drvdata = th->drvdata;
+
+	memcpy(res, subdev->res,
+	       sizeof(struct resource) * subdev->nres);
+
+	for (r = 0; r < subdev->nres; r++) {
+		struct resource *devres = th->resource;
+		int bar = TH_MMIO_CONFIG;
+
+		/*
+		 * Take .end == 0 to mean 'take the whole bar',
+		 * .start then tells us which bar it is. Default to
+		 * TH_MMIO_CONFIG.
+		 */
+		if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
+			bar = res[r].start;
+			res[r].start = 0;
+			res[r].end = resource_size(&devres[bar]) - 1;
+		}
+
+		if (res[r].flags & IORESOURCE_MEM) {
+			res[r].start	+= devres[bar].start;
+			res[r].end	+= devres[bar].start;
 
 
-		thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
-					      subdev->id);
-		if (!thdev) {
-			err = -ENOMEM;
-			goto kill_subdevs;
+			dev_dbg(th->dev, "%s:%d @ %pR\n",
+				subdev->name, r, &res[r]);
+		} else if (res[r].flags & IORESOURCE_IRQ) {
+			res[r].start	= th->irq;
 		}
 		}
+	}
 
 
-		memcpy(res, subdev->res,
-		       sizeof(struct resource) * subdev->nres);
+	err = intel_th_device_add_resources(thdev, res, subdev->nres);
+	if (err) {
+		put_device(&thdev->dev);
+		goto fail_put_device;
+	}
 
 
-		for (r = 0; r < subdev->nres; r++) {
-			int bar = TH_MMIO_CONFIG;
+	if (subdev->type == INTEL_TH_OUTPUT) {
+		thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+		thdev->output.type = subdev->otype;
+		thdev->output.port = -1;
+		thdev->output.scratchpad = subdev->scrpd;
+	} else if (subdev->type == INTEL_TH_SWITCH) {
+		thdev->host_mode = host_mode;
+		th->hub = thdev;
+	}
 
 
-			/*
-			 * Take .end == 0 to mean 'take the whole bar',
-			 * .start then tells us which bar it is. Default to
-			 * TH_MMIO_CONFIG.
-			 */
-			if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
-				bar = res[r].start;
-				res[r].start = 0;
-				res[r].end = resource_size(&devres[bar]) - 1;
-			}
+	err = device_add(&thdev->dev);
+	if (err) {
+		put_device(&thdev->dev);
+		goto fail_free_res;
+	}
 
 
-			if (res[r].flags & IORESOURCE_MEM) {
-				res[r].start	+= devres[bar].start;
-				res[r].end	+= devres[bar].start;
+	/* need switch driver to be loaded to enumerate the rest */
+	if (subdev->type == INTEL_TH_SWITCH && !req) {
+		err = intel_th_request_hub_module(th);
+		if (!err)
+			req++;
+	}
 
 
-				dev_dbg(th->dev, "%s:%d @ %pR\n",
-					subdev->name, r, &res[r]);
-			} else if (res[r].flags & IORESOURCE_IRQ) {
-				res[r].start	= irq;
-			}
-		}
+	return thdev;
 
 
-		err = intel_th_device_add_resources(thdev, res, subdev->nres);
-		if (err) {
-			put_device(&thdev->dev);
-			goto kill_subdevs;
-		}
+fail_free_res:
+	kfree(thdev->resource);
 
 
-		if (subdev->type == INTEL_TH_OUTPUT) {
-			thdev->dev.devt = MKDEV(th->major, dst);
-			thdev->output.type = subdev->otype;
-			thdev->output.port = -1;
-			thdev->output.scratchpad = subdev->scrpd;
-		} else if (subdev->type == INTEL_TH_SWITCH) {
-			thdev->host_mode = host_mode;
-		}
+fail_put_device:
+	put_device(&thdev->dev);
+
+	return ERR_PTR(err);
+}
 
 
-		err = device_add(&thdev->dev);
-		if (err) {
-			put_device(&thdev->dev);
-			goto kill_subdevs;
+/**
+ * intel_th_output_enable() - find and enable a device for a given output type
+ * @th:		Intel TH instance
+ * @otype:	output type
+ *
+ * Go through the unallocated output devices, find the first one whos type
+ * matches @otype and instantiate it. These devices are removed when the hub
+ * device is removed, see intel_th_remove().
+ */
+int intel_th_output_enable(struct intel_th *th, unsigned int otype)
+{
+	struct intel_th_device *thdev;
+	int src = 0, dst = 0;
+
+	for (src = 0, dst = 0; dst <= th->num_thdevs; src++, dst++) {
+		for (; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+			if (intel_th_subdevices[src].type != INTEL_TH_OUTPUT)
+				continue;
+
+			if (intel_th_subdevices[src].otype != otype)
+				continue;
+
+			break;
 		}
 		}
 
 
-		/* need switch driver to be loaded to enumerate the rest */
-		if (subdev->type == INTEL_TH_SWITCH && !req) {
-			th->hub = thdev;
-			err = intel_th_request_hub_module(th);
-			if (!err)
-				req++;
+		/* no unallocated matching subdevices */
+		if (src == ARRAY_SIZE(intel_th_subdevices))
+			return -ENODEV;
+
+		for (; dst < th->num_thdevs; dst++) {
+			if (th->thdev[dst]->type != INTEL_TH_OUTPUT)
+				continue;
+
+			if (th->thdev[dst]->output.type != otype)
+				continue;
+
+			break;
 		}
 		}
 
 
-		th->thdev[dst++] = thdev;
+		/*
+		 * intel_th_subdevices[src] matches our requirements and is
+		 * not matched in th::thdev[]
+		 */
+		if (dst == th->num_thdevs)
+			goto found;
 	}
 	}
 
 
+	return -ENODEV;
+
+found:
+	thdev = intel_th_subdevice_alloc(th, &intel_th_subdevices[src]);
+	if (IS_ERR(thdev))
+		return PTR_ERR(thdev);
+
+	th->thdev[th->num_thdevs++] = thdev;
+
 	return 0;
 	return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_output_enable);
+
+static int intel_th_populate(struct intel_th *th)
+{
+	int src;
+
+	/* create devices for each intel_th_subdevice */
+	for (src = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+		const struct intel_th_subdevice *subdev =
+			&intel_th_subdevices[src];
+		struct intel_th_device *thdev;
+
+		/* only allow SOURCE and SWITCH devices in host mode */
+		if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+			continue;
 
 
-kill_subdevs:
-	for (; dst >= 0; dst--)
-		intel_th_device_remove(th->thdev[dst]);
+		/*
+		 * don't enable port OUTPUTs in this path; SWITCH enables them
+		 * via intel_th_output_enable()
+		 */
+		if (subdev->type == INTEL_TH_OUTPUT &&
+		    subdev->otype != GTH_NONE)
+			continue;
+
+		thdev = intel_th_subdevice_alloc(th, subdev);
+		/* note: caller should free subdevices from th::thdev[] */
+		if (IS_ERR(thdev))
+			return PTR_ERR(thdev);
+
+		th->thdev[th->num_thdevs++] = thdev;
+	}
 
 
-	return err;
+	return 0;
 }
 }
 
 
 static int match_devt(struct device *dev, void *data)
 static int match_devt(struct device *dev, void *data)
@@ -670,8 +809,8 @@ static const struct file_operations intel_th_output_fops = {
  * @irq:	irq number
  * @irq:	irq number
  */
  */
 struct intel_th *
 struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
-	       unsigned int ndevres, int irq)
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+	       struct resource *devres, unsigned int ndevres, int irq)
 {
 {
 	struct intel_th *th;
 	struct intel_th *th;
 	int err;
 	int err;
@@ -693,6 +832,11 @@ intel_th_alloc(struct device *dev, struct resource *devres,
 		goto err_ida;
 		goto err_ida;
 	}
 	}
 	th->dev = dev;
 	th->dev = dev;
+	th->drvdata = drvdata;
+
+	th->resource = devres;
+	th->num_resources = ndevres;
+	th->irq = irq;
 
 
 	dev_set_drvdata(dev, th);
 	dev_set_drvdata(dev, th);
 
 
@@ -700,18 +844,15 @@ intel_th_alloc(struct device *dev, struct resource *devres,
 	pm_runtime_put(dev);
 	pm_runtime_put(dev);
 	pm_runtime_allow(dev);
 	pm_runtime_allow(dev);
 
 
-	err = intel_th_populate(th, devres, ndevres, irq);
-	if (err)
-		goto err_chrdev;
+	err = intel_th_populate(th);
+	if (err) {
+		/* free the subdevices and undo everything */
+		intel_th_free(th);
+		return ERR_PTR(err);
+	}
 
 
 	return th;
 	return th;
 
 
-err_chrdev:
-	pm_runtime_forbid(dev);
-
-	__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
-			    "intel_th/output");
-
 err_ida:
 err_ida:
 	ida_simple_remove(&intel_th_ida, th->id);
 	ida_simple_remove(&intel_th_ida, th->id);
 
 
@@ -727,11 +868,15 @@ void intel_th_free(struct intel_th *th)
 	int i;
 	int i;
 
 
 	intel_th_request_hub_module_flush(th);
 	intel_th_request_hub_module_flush(th);
-	for (i = 0; i < TH_SUBDEVICE_MAX; i++)
-		if (th->thdev[i] && th->thdev[i] != th->hub)
-			intel_th_device_remove(th->thdev[i]);
 
 
 	intel_th_device_remove(th->hub);
 	intel_th_device_remove(th->hub);
+	for (i = 0; i < th->num_thdevs; i++) {
+		if (th->thdev[i] != th->hub)
+			intel_th_device_remove(th->thdev[i]);
+		th->thdev[i] = NULL;
+	}
+
+	th->num_thdevs = 0;
 
 
 	pm_runtime_get_sync(th->dev);
 	pm_runtime_get_sync(th->dev);
 	pm_runtime_forbid(th->dev);
 	pm_runtime_forbid(th->dev);

+ 30 - 10
drivers/hwtracing/intel_th/gth.c

@@ -285,16 +285,16 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
  */
  */
 static int intel_th_gth_reset(struct gth_device *gth)
 static int intel_th_gth_reset(struct gth_device *gth)
 {
 {
-	u32 scratchpad;
+	u32 reg;
 	int port, i;
 	int port, i;
 
 
-	scratchpad = ioread32(gth->base + REG_GTH_SCRPD0);
-	if (scratchpad & SCRPD_DEBUGGER_IN_USE)
+	reg = ioread32(gth->base + REG_GTH_SCRPD0);
+	if (reg & SCRPD_DEBUGGER_IN_USE)
 		return -EBUSY;
 		return -EBUSY;
 
 
 	/* Always save/restore STH and TU registers in S0ix entry/exit */
 	/* Always save/restore STH and TU registers in S0ix entry/exit */
-	scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
-	iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+	reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 
 
 	/* output ports */
 	/* output ports */
 	for (port = 0; port < 8; port++) {
 	for (port = 0; port < 8; port++) {
@@ -512,6 +512,15 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 }
 }
 
 
+static void gth_tscu_resync(struct gth_device *gth)
+{
+	u32 reg;
+
+	reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
+	reg &= ~TSUCTRL_CTCRESYNC;
+	iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
+}
+
 /**
 /**
  * intel_th_gth_enable() - enable tracing to an output device
  * intel_th_gth_enable() - enable tracing to an output device
  * @thdev:	GTH device
  * @thdev:	GTH device
@@ -524,6 +533,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
 				struct intel_th_output *output)
 				struct intel_th_output *output)
 {
 {
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+	struct intel_th *th = to_intel_th(thdev);
 	u32 scr = 0xfc0000, scrpd;
 	u32 scr = 0xfc0000, scrpd;
 	int master;
 	int master;
 
 
@@ -539,6 +549,9 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
 	output->active = true;
 	output->active = true;
 	spin_unlock(&gth->gth_lock);
 	spin_unlock(&gth->gth_lock);
 
 
+	if (INTEL_TH_CAP(th, tscu_enable))
+		gth_tscu_resync(gth);
+
 	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
 	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
 	scrpd |= output->scratchpad;
 	scrpd |= output->scratchpad;
 	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
 	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
@@ -639,6 +652,7 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
 static int intel_th_gth_probe(struct intel_th_device *thdev)
 static int intel_th_gth_probe(struct intel_th_device *thdev)
 {
 {
 	struct device *dev = &thdev->dev;
 	struct device *dev = &thdev->dev;
+	struct intel_th *th = dev_get_drvdata(dev->parent);
 	struct gth_device *gth;
 	struct gth_device *gth;
 	struct resource *res;
 	struct resource *res;
 	void __iomem *base;
 	void __iomem *base;
@@ -660,6 +674,8 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 	gth->base = base;
 	gth->base = base;
 	spin_lock_init(&gth->gth_lock);
 	spin_lock_init(&gth->gth_lock);
 
 
+	dev_set_drvdata(dev, gth);
+
 	/*
 	/*
 	 * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
 	 * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
 	 * bit. Either way, don't reset HW in this case, and don't export any
 	 * bit. Either way, don't reset HW in this case, and don't export any
@@ -667,7 +683,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 	 * drivers to ports, see intel_th_gth_assign().
 	 * drivers to ports, see intel_th_gth_assign().
 	 */
 	 */
 	if (thdev->host_mode)
 	if (thdev->host_mode)
-		goto done;
+		return 0;
 
 
 	ret = intel_th_gth_reset(gth);
 	ret = intel_th_gth_reset(gth);
 	if (ret) {
 	if (ret) {
@@ -676,7 +692,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 
 
 		thdev->host_mode = true;
 		thdev->host_mode = true;
 
 
-		goto done;
+		return 0;
 	}
 	}
 
 
 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
@@ -687,6 +703,13 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 		gth->output[i].index = i;
 		gth->output[i].index = i;
 		gth->output[i].port_type =
 		gth->output[i].port_type =
 			gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
 			gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
+		if (gth->output[i].port_type == GTH_NONE)
+			continue;
+
+		ret = intel_th_output_enable(th, gth->output[i].port_type);
+		/* -ENODEV is ok, we just won't have that device enumerated */
+		if (ret && ret != -ENODEV)
+			return ret;
 	}
 	}
 
 
 	if (intel_th_output_attributes(gth) ||
 	if (intel_th_output_attributes(gth) ||
@@ -698,9 +721,6 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-done:
-	dev_set_drvdata(dev, gth);
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 5 - 0
drivers/hwtracing/intel_th/gth.h

@@ -55,9 +55,14 @@ enum {
 	REG_GTH_SCRPD1		= 0xe4, /* ScratchPad[1] */
 	REG_GTH_SCRPD1		= 0xe4, /* ScratchPad[1] */
 	REG_GTH_SCRPD2		= 0xe8, /* ScratchPad[2] */
 	REG_GTH_SCRPD2		= 0xe8, /* ScratchPad[2] */
 	REG_GTH_SCRPD3		= 0xec, /* ScratchPad[3] */
 	REG_GTH_SCRPD3		= 0xec, /* ScratchPad[3] */
+	REG_TSCU_TSUCTRL	= 0x2000, /* TSCU control register */
+	REG_TSCU_TSCUSTAT	= 0x2004, /* TSCU status register */
 };
 };
 
 
 /* waiting for Pipeline Empty bit(s) to assert for GTH */
 /* waiting for Pipeline Empty bit(s) to assert for GTH */
 #define GTH_PLE_WAITLOOP_DEPTH	10000
 #define GTH_PLE_WAITLOOP_DEPTH	10000
 
 
+#define TSUCTRL_CTCRESYNC	BIT(0)
+#define TSCUSTAT_CTCSYNCING	BIT(1)
+
 #endif /* __INTEL_TH_GTH_H__ */
 #endif /* __INTEL_TH_GTH_H__ */

+ 84 - 20
drivers/hwtracing/intel_th/intel_th.h

@@ -47,9 +47,20 @@ struct intel_th_output {
 	bool		active;
 	bool		active;
 };
 };
 
 
+/**
+ * struct intel_th_drvdata - describes hardware capabilities and quirks
+ * @tscu_enable:	device needs SW to enable time stamping unit
+ */
+struct intel_th_drvdata {
+	unsigned int	tscu_enable        : 1;
+};
+
+#define INTEL_TH_CAP(_th, _cap) ((_th)->drvdata ? (_th)->drvdata->_cap : 0)
+
 /**
 /**
  * struct intel_th_device - device on the intel_th bus
  * struct intel_th_device - device on the intel_th bus
  * @dev:		device
  * @dev:		device
+ * @drvdata:		hardware capabilities/quirks
  * @resource:		array of resources available to this device
  * @resource:		array of resources available to this device
  * @num_resources:	number of resources in @resource array
  * @num_resources:	number of resources in @resource array
  * @type:		INTEL_TH_{SOURCE,OUTPUT,SWITCH}
  * @type:		INTEL_TH_{SOURCE,OUTPUT,SWITCH}
@@ -59,11 +70,12 @@ struct intel_th_output {
  * @name:		device name to match the driver
  * @name:		device name to match the driver
  */
  */
 struct intel_th_device {
 struct intel_th_device {
-	struct device	dev;
-	struct resource	*resource;
-	unsigned int	num_resources;
-	unsigned int	type;
-	int		id;
+	struct device		dev;
+	struct intel_th_drvdata *drvdata;
+	struct resource		*resource;
+	unsigned int		num_resources;
+	unsigned int		type;
+	int			id;
 
 
 	/* INTEL_TH_SWITCH specific */
 	/* INTEL_TH_SWITCH specific */
 	bool			host_mode;
 	bool			host_mode;
@@ -96,6 +108,17 @@ intel_th_device_get_resource(struct intel_th_device *thdev, unsigned int type,
 	return NULL;
 	return NULL;
 }
 }
 
 
+/*
+ * GTH, output ports configuration
+ */
+enum {
+	GTH_NONE = 0,
+	GTH_MSU,	/* memory/usb */
+	GTH_CTP,	/* Common Trace Port */
+	GTH_LPP,	/* Low Power Path */
+	GTH_PTI,	/* MIPI-PTI */
+};
+
 /**
 /**
  * intel_th_output_assigned() - if an output device is assigned to a switch port
  * intel_th_output_assigned() - if an output device is assigned to a switch port
  * @thdev:	the output device
  * @thdev:	the output device
@@ -106,7 +129,8 @@ static inline bool
 intel_th_output_assigned(struct intel_th_device *thdev)
 intel_th_output_assigned(struct intel_th_device *thdev)
 {
 {
 	return thdev->type == INTEL_TH_OUTPUT &&
 	return thdev->type == INTEL_TH_OUTPUT &&
-		thdev->output.port >= 0;
+		(thdev->output.port >= 0 ||
+		 thdev->output.type == GTH_NONE);
 }
 }
 
 
 /**
 /**
@@ -161,8 +185,18 @@ struct intel_th_driver {
 #define to_intel_th_driver_or_null(_d)		\
 #define to_intel_th_driver_or_null(_d)		\
 	((_d) ? to_intel_th_driver(_d) : NULL)
 	((_d) ? to_intel_th_driver(_d) : NULL)
 
 
+/*
+ * Subdevice tree structure is as follows:
+ * + struct intel_th device (pci; dev_{get,set}_drvdata()
+ *   + struct intel_th_device INTEL_TH_SWITCH (GTH)
+ *     + struct intel_th_device INTEL_TH_OUTPUT (MSU, PTI)
+ *   + struct intel_th_device INTEL_TH_SOURCE (STH)
+ *
+ * In other words, INTEL_TH_OUTPUT devices are children of INTEL_TH_SWITCH;
+ * INTEL_TH_SWITCH and INTEL_TH_SOURCE are children of the intel_th device.
+ */
 static inline struct intel_th_device *
 static inline struct intel_th_device *
-to_intel_th_hub(struct intel_th_device *thdev)
+to_intel_th_parent(struct intel_th_device *thdev)
 {
 {
 	struct device *parent = thdev->dev.parent;
 	struct device *parent = thdev->dev.parent;
 
 
@@ -172,9 +206,20 @@ to_intel_th_hub(struct intel_th_device *thdev)
 	return to_intel_th_device(parent);
 	return to_intel_th_device(parent);
 }
 }
 
 
+static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+	if (thdev->type == INTEL_TH_OUTPUT)
+		thdev = to_intel_th_parent(thdev);
+
+	if (WARN_ON_ONCE(!thdev || thdev->type == INTEL_TH_OUTPUT))
+		return NULL;
+
+	return dev_get_drvdata(thdev->dev.parent);
+}
+
 struct intel_th *
 struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
-	       unsigned int ndevres, int irq);
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+	       struct resource *devres, unsigned int ndevres, int irq);
 void intel_th_free(struct intel_th *th);
 void intel_th_free(struct intel_th *th);
 
 
 int intel_th_driver_register(struct intel_th_driver *thdrv);
 int intel_th_driver_register(struct intel_th_driver *thdrv);
@@ -184,6 +229,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev);
 int intel_th_trace_disable(struct intel_th_device *thdev);
 int intel_th_trace_disable(struct intel_th_device *thdev);
 int intel_th_set_output(struct intel_th_device *thdev,
 int intel_th_set_output(struct intel_th_device *thdev,
 			unsigned int master);
 			unsigned int master);
+int intel_th_output_enable(struct intel_th *th, unsigned int otype);
 
 
 enum {
 enum {
 	TH_MMIO_CONFIG = 0,
 	TH_MMIO_CONFIG = 0,
@@ -191,8 +237,9 @@ enum {
 	TH_MMIO_END,
 	TH_MMIO_END,
 };
 };
 
 
-#define TH_SUBDEVICE_MAX	6
 #define TH_POSSIBLE_OUTPUTS	8
 #define TH_POSSIBLE_OUTPUTS	8
+/* Total number of possible subdevices: outputs + GTH + STH */
+#define TH_SUBDEVICE_MAX	(TH_POSSIBLE_OUTPUTS + 2)
 #define TH_CONFIGURABLE_MASTERS 256
 #define TH_CONFIGURABLE_MASTERS 256
 #define TH_MSC_MAX		2
 #define TH_MSC_MAX		2
 
 
@@ -201,6 +248,10 @@ enum {
  * @dev:	driver core's device
  * @dev:	driver core's device
  * @thdev:	subdevices
  * @thdev:	subdevices
  * @hub:	"switch" subdevice (GTH)
  * @hub:	"switch" subdevice (GTH)
+ * @resource:	resources of the entire controller
+ * @num_thdevs:	number of devices in the @thdev array
+ * @num_resources:	number or resources in the @resource array
+ * @irq:	irq number
  * @id:		this Intel TH controller's device ID in the system
  * @id:		this Intel TH controller's device ID in the system
  * @major:	device node major for output devices
  * @major:	device node major for output devices
  */
  */
@@ -209,6 +260,14 @@ struct intel_th {
 
 
 	struct intel_th_device	*thdev[TH_SUBDEVICE_MAX];
 	struct intel_th_device	*thdev[TH_SUBDEVICE_MAX];
 	struct intel_th_device	*hub;
 	struct intel_th_device	*hub;
+	struct intel_th_drvdata	*drvdata;
+
+	struct resource		*resource;
+	int			(*activate)(struct intel_th *);
+	void			(*deactivate)(struct intel_th *);
+	unsigned int		num_thdevs;
+	unsigned int		num_resources;
+	int			irq;
 
 
 	int			id;
 	int			id;
 	int			major;
 	int			major;
@@ -220,6 +279,17 @@ struct intel_th {
 #endif
 #endif
 };
 };
 
 
+static inline struct intel_th_device *
+to_intel_th_hub(struct intel_th_device *thdev)
+{
+	if (thdev->type == INTEL_TH_SWITCH)
+		return thdev;
+	else if (thdev->type == INTEL_TH_OUTPUT)
+		return to_intel_th_parent(thdev);
+
+	return to_intel_th(thdev)->hub;
+}
+
 /*
 /*
  * Register windows
  * Register windows
  */
  */
@@ -228,6 +298,10 @@ enum {
 	REG_GTH_OFFSET		= 0x0000,
 	REG_GTH_OFFSET		= 0x0000,
 	REG_GTH_LENGTH		= 0x2000,
 	REG_GTH_LENGTH		= 0x2000,
 
 
+	/* Timestamp counter unit (TSCU) */
+	REG_TSCU_OFFSET		= 0x2000,
+	REG_TSCU_LENGTH		= 0x1000,
+
 	/* Software Trace Hub (STH) [0x4000..0x4fff] */
 	/* Software Trace Hub (STH) [0x4000..0x4fff] */
 	REG_STH_OFFSET		= 0x4000,
 	REG_STH_OFFSET		= 0x4000,
 	REG_STH_LENGTH		= 0x2000,
 	REG_STH_LENGTH		= 0x2000,
@@ -249,16 +323,6 @@ enum {
 	REG_DCIH_LENGTH		= REG_MSU_LENGTH,
 	REG_DCIH_LENGTH		= REG_MSU_LENGTH,
 };
 };
 
 
-/*
- * GTH, output ports configuration
- */
-enum {
-	GTH_NONE = 0,
-	GTH_MSU,	/* memory/usb */
-	GTH_CTP,	/* Common Trace Port */
-	GTH_PTI = 4,	/* MIPI-PTI */
-};
-
 /*
 /*
  * Scratchpad bits: tell firmware and external debuggers
  * Scratchpad bits: tell firmware and external debuggers
  * what we are up to.
  * what we are up to.

+ 6 - 6
drivers/hwtracing/intel_th/msu.c

@@ -709,17 +709,17 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
 	}
 	}
 
 
 	for (i = 0; i < nr_blocks; i++) {
 	for (i = 0; i < nr_blocks; i++) {
-		win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size,
-							 &win->block[i].addr,
-							 GFP_KERNEL);
+		win->block[i].bdesc =
+			dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
+					   &win->block[i].addr, GFP_KERNEL);
+
+		if (!win->block[i].bdesc)
+			goto err_nomem;
 
 
 #ifdef CONFIG_X86
 #ifdef CONFIG_X86
 		/* Set the page as uncached */
 		/* Set the page as uncached */
 		set_memory_uc((unsigned long)win->block[i].bdesc, 1);
 		set_memory_uc((unsigned long)win->block[i].bdesc, 1);
 #endif
 #endif
-
-		if (!win->block[i].bdesc)
-			goto err_nomem;
 	}
 	}
 
 
 	win->msc = msc;
 	win->msc = msc;

+ 65 - 2
drivers/hwtracing/intel_th/pci.c

@@ -27,9 +27,53 @@
 
 
 #define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
 #define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
 
 
+#define PCI_REG_NPKDSC	0x80
+#define NPKDSC_TSACT	BIT(5)
+
+static int intel_th_pci_activate(struct intel_th *th)
+{
+	struct pci_dev *pdev = to_pci_dev(th->dev);
+	u32 npkdsc;
+	int err;
+
+	if (!INTEL_TH_CAP(th, tscu_enable))
+		return 0;
+
+	err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+	if (!err) {
+		npkdsc |= NPKDSC_TSACT;
+		err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+	}
+
+	if (err)
+		dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+
+	return err;
+}
+
+static void intel_th_pci_deactivate(struct intel_th *th)
+{
+	struct pci_dev *pdev = to_pci_dev(th->dev);
+	u32 npkdsc;
+	int err;
+
+	if (!INTEL_TH_CAP(th, tscu_enable))
+		return;
+
+	err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+	if (!err) {
+		npkdsc |= NPKDSC_TSACT;
+		err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+	}
+
+	if (err)
+		dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+}
+
 static int intel_th_pci_probe(struct pci_dev *pdev,
 static int intel_th_pci_probe(struct pci_dev *pdev,
 			      const struct pci_device_id *id)
 			      const struct pci_device_id *id)
 {
 {
+	struct intel_th_drvdata *drvdata = (void *)id->driver_data;
 	struct intel_th *th;
 	struct intel_th *th;
 	int err;
 	int err;
 
 
@@ -41,11 +85,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	th = intel_th_alloc(&pdev->dev, pdev->resource,
+	th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
 			    DEVICE_COUNT_RESOURCE, pdev->irq);
 			    DEVICE_COUNT_RESOURCE, pdev->irq);
 	if (IS_ERR(th))
 	if (IS_ERR(th))
 		return PTR_ERR(th);
 		return PTR_ERR(th);
 
 
+	th->activate   = intel_th_pci_activate;
+	th->deactivate = intel_th_pci_deactivate;
+
+	pci_set_master(pdev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -56,6 +105,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
 	intel_th_free(th);
 	intel_th_free(th);
 }
 }
 
 
+static const struct intel_th_drvdata intel_th_2x = {
+	.tscu_enable	= 1,
+};
+
 static const struct pci_device_id intel_th_pci_id_table[] = {
 static const struct pci_device_id intel_th_pci_id_table[] = {
 	{
 	{
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9d26),
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9d26),
@@ -93,7 +146,17 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 	{
 	{
 		/* Gemini Lake */
 		/* Gemini Lake */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
-		.driver_data = (kernel_ulong_t)0,
+		.driver_data = (kernel_ulong_t)&intel_th_2x,
+	},
+	{
+		/* Cannon Lake H */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+		.driver_data = (kernel_ulong_t)&intel_th_2x,
+	},
+	{
+		/* Cannon Lake LP */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+		.driver_data = (kernel_ulong_t)&intel_th_2x,
 	},
 	},
 	{ 0 },
 	{ 0 },
 };
 };

+ 110 - 5
drivers/hwtracing/intel_th/pti.c

@@ -1,7 +1,7 @@
 /*
 /*
  * Intel(R) Trace Hub PTI output driver
  * Intel(R) Trace Hub PTI output driver
  *
  *
- * Copyright (C) 2014-2015 Intel Corporation.
+ * Copyright (C) 2014-2016 Intel Corporation.
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  * under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,8 @@ struct pti_device {
 	unsigned int		freeclk;
 	unsigned int		freeclk;
 	unsigned int		clkdiv;
 	unsigned int		clkdiv;
 	unsigned int		patgen;
 	unsigned int		patgen;
+	unsigned int		lpp_dest_mask;
+	unsigned int		lpp_dest;
 };
 };
 
 
 /* map PTI widths to MODE settings of PTI_CTL register */
 /* map PTI widths to MODE settings of PTI_CTL register */
@@ -163,6 +165,7 @@ static int intel_th_pti_activate(struct intel_th_device *thdev)
 		ctl |= PTI_FCEN;
 		ctl |= PTI_FCEN;
 	ctl |= pti->mode << __ffs(PTI_MODE);
 	ctl |= pti->mode << __ffs(PTI_MODE);
 	ctl |= pti->clkdiv << __ffs(PTI_CLKDIV);
 	ctl |= pti->clkdiv << __ffs(PTI_CLKDIV);
+	ctl |= pti->lpp_dest << __ffs(LPP_DEST);
 
 
 	iowrite32(ctl, pti->base + REG_PTI_CTL);
 	iowrite32(ctl, pti->base + REG_PTI_CTL);
 
 
@@ -192,6 +195,15 @@ static void read_hw_config(struct pti_device *pti)
 		pti->mode = pti_width_mode(4);
 		pti->mode = pti_width_mode(4);
 	if (!pti->clkdiv)
 	if (!pti->clkdiv)
 		pti->clkdiv = 1;
 		pti->clkdiv = 1;
+
+	if (pti->thdev->output.type == GTH_LPP) {
+		if (ctl & LPP_PTIPRESENT)
+			pti->lpp_dest_mask |= LPP_DEST_PTI;
+		if (ctl & LPP_BSSBPRESENT)
+			pti->lpp_dest_mask |= LPP_DEST_EXI;
+		if (ctl & LPP_DEST)
+			pti->lpp_dest = 1;
+	}
 }
 }
 
 
 static int intel_th_pti_probe(struct intel_th_device *thdev)
 static int intel_th_pti_probe(struct intel_th_device *thdev)
@@ -239,10 +251,103 @@ static struct intel_th_driver intel_th_pti_driver = {
 	},
 	},
 };
 };
 
 
-module_driver(intel_th_pti_driver,
-	      intel_th_driver_register,
-	      intel_th_driver_unregister);
+static const char * const lpp_dest_str[] = { "pti", "exi" };
+
+static ssize_t lpp_dest_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct pti_device *pti = dev_get_drvdata(dev);
+	ssize_t ret = 0;
+	int i;
+
+	for (i = ARRAY_SIZE(lpp_dest_str) - 1; i >= 0; i--) {
+		const char *fmt = pti->lpp_dest == i ? "[%s] " : "%s ";
+
+		if (!(pti->lpp_dest_mask & BIT(i)))
+			continue;
+
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+				 fmt, lpp_dest_str[i]);
+	}
+
+	if (ret)
+		buf[ret - 1] = '\n';
+
+	return ret;
+}
+
+static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	struct pti_device *pti = dev_get_drvdata(dev);
+	ssize_t ret = -EINVAL;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
+		if (sysfs_streq(buf, lpp_dest_str[i]))
+			break;
+
+	if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
+		pti->lpp_dest = i;
+		ret = size;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR_RW(lpp_dest);
+
+static struct attribute *lpp_output_attrs[] = {
+	&dev_attr_mode.attr,
+	&dev_attr_freerunning_clock.attr,
+	&dev_attr_clock_divider.attr,
+	&dev_attr_lpp_dest.attr,
+	NULL,
+};
+
+static struct attribute_group lpp_output_group = {
+	.attrs	= lpp_output_attrs,
+};
+
+static struct intel_th_driver intel_th_lpp_driver = {
+	.probe		= intel_th_pti_probe,
+	.remove		= intel_th_pti_remove,
+	.activate	= intel_th_pti_activate,
+	.deactivate	= intel_th_pti_deactivate,
+	.attr_group	= &lpp_output_group,
+	.driver	= {
+		.name	= "lpp",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init intel_th_pti_lpp_init(void)
+{
+	int err;
+
+	err = intel_th_driver_register(&intel_th_pti_driver);
+	if (err)
+		return err;
+
+	err = intel_th_driver_register(&intel_th_lpp_driver);
+	if (err) {
+		intel_th_driver_unregister(&intel_th_pti_driver);
+		return err;
+	}
+
+	return 0;
+}
+
+module_init(intel_th_pti_lpp_init);
+
+static void __exit intel_th_pti_lpp_exit(void)
+{
+	intel_th_driver_unregister(&intel_th_pti_driver);
+	intel_th_driver_unregister(&intel_th_lpp_driver);
+}
+
+module_exit(intel_th_pti_lpp_exit);
 
 
 MODULE_LICENSE("GPL v2");
 MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel(R) Trace Hub PTI output driver");
+MODULE_DESCRIPTION("Intel(R) Trace Hub PTI/LPP output driver");
 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");

+ 8 - 0
drivers/hwtracing/intel_th/pti.h

@@ -23,7 +23,15 @@ enum {
 #define PTI_EN		BIT(0)
 #define PTI_EN		BIT(0)
 #define PTI_FCEN	BIT(1)
 #define PTI_FCEN	BIT(1)
 #define PTI_MODE	0xf0
 #define PTI_MODE	0xf0
+#define LPP_PTIPRESENT	BIT(8)
+#define LPP_BSSBPRESENT	BIT(9)
 #define PTI_CLKDIV	0x000f0000
 #define PTI_CLKDIV	0x000f0000
 #define PTI_PATGENMODE	0x00f00000
 #define PTI_PATGENMODE	0x00f00000
+#define LPP_DEST	BIT(25)
+#define LPP_BSSBACT	BIT(30)
+#define LPP_LPPBUSY	BIT(31)
+
+#define LPP_DEST_PTI	BIT(0)
+#define LPP_DEST_EXI	BIT(1)
 
 
 #endif /* __INTEL_TH_STH_H__ */
 #endif /* __INTEL_TH_STH_H__ */

+ 1 - 1
drivers/hwtracing/stm/core.c

@@ -566,7 +566,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
 	if (copy_from_user(&size, arg, sizeof(size)))
 	if (copy_from_user(&size, arg, sizeof(size)))
 		return -EFAULT;
 		return -EFAULT;
 
 
-	if (size >= PATH_MAX + sizeof(*id))
+	if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/*
 	/*

+ 15 - 0
drivers/mcb/mcb-lpc.c

@@ -114,6 +114,12 @@ static struct resource sc24_fpga_resource = {
 	.flags = IORESOURCE_MEM,
 	.flags = IORESOURCE_MEM,
 };
 };
 
 
+static struct resource sc31_fpga_resource = {
+	.start = 0xf000e000,
+	.end = 0xf000e000 + CHAM_HEADER_SIZE,
+	.flags = IORESOURCE_MEM,
+};
+
 static struct platform_driver mcb_lpc_driver = {
 static struct platform_driver mcb_lpc_driver = {
 	.driver		= {
 	.driver		= {
 		.name = "mcb-lpc",
 		.name = "mcb-lpc",
@@ -132,6 +138,15 @@ static const struct dmi_system_id mcb_lpc_dmi_table[] = {
 		.driver_data = (void *)&sc24_fpga_resource,
 		.driver_data = (void *)&sc24_fpga_resource,
 		.callback = mcb_lpc_create_platform_device,
 		.callback = mcb_lpc_create_platform_device,
 	},
 	},
+	{
+		.ident = "SC31",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
+		},
+		.driver_data = (void *)&sc31_fpga_resource,
+		.callback = mcb_lpc_create_platform_device,
+	},
 	{}
 	{}
 };
 };
 MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
 MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);

+ 4 - 2
drivers/mcb/mcb-parse.c

@@ -182,7 +182,7 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
 	int num_cells = 0;
 	int num_cells = 0;
 	uint32_t dtype;
 	uint32_t dtype;
 	int bar_count;
 	int bar_count;
-	int ret = 0;
+	int ret;
 	u32 hsize;
 	u32 hsize;
 
 
 	hsize = sizeof(struct chameleon_fpga_header);
 	hsize = sizeof(struct chameleon_fpga_header);
@@ -210,8 +210,10 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
 		 header->filename);
 		 header->filename);
 
 
 	bar_count = chameleon_get_bar(&p, mapbase, &cb);
 	bar_count = chameleon_get_bar(&p, mapbase, &cb);
-	if (bar_count < 0)
+	if (bar_count < 0) {
+		ret = bar_count;
 		goto free_header;
 		goto free_header;
+	}
 
 
 	for_each_chameleon_cell(dtype, p) {
 	for_each_chameleon_cell(dtype, p) {
 		switch (dtype) {
 		switch (dtype) {

+ 1 - 0
drivers/misc/Makefile

@@ -60,6 +60,7 @@ lkdtm-$(CONFIG_LKDTM)		+= lkdtm_core.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_bugs.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_bugs.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_heap.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_heap.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_perms.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_perms.o
+lkdtm-$(CONFIG_LKDTM)		+= lkdtm_refcount.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_rodata_objcopy.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_rodata_objcopy.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_usercopy.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_usercopy.o
 
 

+ 2 - 2
drivers/misc/apds9802als.c

@@ -197,7 +197,7 @@ static struct attribute *mid_att_als[] = {
 	NULL
 	NULL
 };
 };
 
 
-static struct attribute_group m_als_gr = {
+static const struct attribute_group m_als_gr = {
 	.name = "apds9802als",
 	.name = "apds9802als",
 	.attrs = mid_att_als
 	.attrs = mid_att_als
 };
 };
@@ -298,7 +298,7 @@ static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend,
 #define APDS9802ALS_PM_OPS NULL
 #define APDS9802ALS_PM_OPS NULL
 #endif	/* CONFIG_PM */
 #endif	/* CONFIG_PM */
 
 
-static struct i2c_device_id apds9802als_id[] = {
+static const struct i2c_device_id apds9802als_id[] = {
 	{ DRIVER_NAME, 0 },
 	{ DRIVER_NAME, 0 },
 	{ }
 	{ }
 };
 };

+ 1 - 1
drivers/misc/apds990x.c

@@ -1051,7 +1051,7 @@ static struct attribute *sysfs_attrs_ctrl[] = {
 	NULL
 	NULL
 };
 };
 
 
-static struct attribute_group apds990x_attribute_group[] = {
+static const struct attribute_group apds990x_attribute_group[] = {
 	{.attrs = sysfs_attrs_ctrl },
 	{.attrs = sysfs_attrs_ctrl },
 };
 };
 
 

+ 29 - 5
drivers/misc/aspeed-lpc-snoop.c

@@ -20,6 +20,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/regmap.h>
 
 
@@ -51,6 +52,13 @@
 #define HICRB_ENSNP0D		BIT(14)
 #define HICRB_ENSNP0D		BIT(14)
 #define HICRB_ENSNP1D		BIT(15)
 #define HICRB_ENSNP1D		BIT(15)
 
 
+struct aspeed_lpc_snoop_model_data {
+	/* The ast2400 has bits 14 and 15 as reserved, whereas the ast2500
+	 * can use them.
+	 */
+	unsigned int has_hicrb_ensnp;
+};
+
 struct aspeed_lpc_snoop {
 struct aspeed_lpc_snoop {
 	struct regmap		*regmap;
 	struct regmap		*regmap;
 	int			irq;
 	int			irq;
@@ -123,10 +131,13 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
 }
 }
 
 
 static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
 static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
-				  int channel, u16 lpc_port)
+				   struct device *dev,
+				   int channel, u16 lpc_port)
 {
 {
 	int rc = 0;
 	int rc = 0;
 	u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
 	u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+	const struct aspeed_lpc_snoop_model_data *model_data =
+		of_device_get_match_data(dev);
 
 
 	/* Create FIFO datastructure */
 	/* Create FIFO datastructure */
 	rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
 	rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
@@ -155,7 +166,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
 	regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
 	regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
 	regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
 	regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
 			   lpc_port << snpwadr_shift);
 			   lpc_port << snpwadr_shift);
-	regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en);
+	if (model_data->has_hicrb_ensnp)
+		regmap_update_bits(lpc_snoop->regmap, HICRB,
+				hicrb_en, hicrb_en);
 
 
 	return rc;
 	return rc;
 }
 }
@@ -213,14 +226,14 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	rc = aspeed_lpc_enable_snoop(lpc_snoop, 0, port);
+	rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
 	/* Configuration of 2nd snoop channel port is optional */
 	/* Configuration of 2nd snoop channel port is optional */
 	if (of_property_read_u32_index(dev->of_node, "snoop-ports",
 	if (of_property_read_u32_index(dev->of_node, "snoop-ports",
 				       1, &port) == 0) {
 				       1, &port) == 0) {
-		rc = aspeed_lpc_enable_snoop(lpc_snoop, 1, port);
+		rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
 		if (rc)
 		if (rc)
 			aspeed_lpc_disable_snoop(lpc_snoop, 0);
 			aspeed_lpc_disable_snoop(lpc_snoop, 0);
 	}
 	}
@@ -239,8 +252,19 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
+static const struct aspeed_lpc_snoop_model_data ast2400_model_data = {
+	.has_hicrb_ensnp = 0,
+};
+
+static const struct aspeed_lpc_snoop_model_data ast2500_model_data = {
+	.has_hicrb_ensnp = 1,
+};
+
 static const struct of_device_id aspeed_lpc_snoop_match[] = {
 static const struct of_device_id aspeed_lpc_snoop_match[] = {
-	{ .compatible = "aspeed,ast2500-lpc-snoop" },
+	{ .compatible = "aspeed,ast2400-lpc-snoop",
+	  .data = &ast2400_model_data },
+	{ .compatible = "aspeed,ast2500-lpc-snoop",
+	  .data = &ast2500_model_data },
 	{ },
 	{ },
 };
 };
 
 

+ 1 - 1
drivers/misc/bh1770glc.c

@@ -1175,7 +1175,7 @@ static struct attribute *sysfs_attrs[] = {
 	NULL
 	NULL
 };
 };
 
 
-static struct attribute_group bh1770_attribute_group = {
+static const struct attribute_group bh1770_attribute_group = {
 	.attrs = sysfs_attrs
 	.attrs = sysfs_attrs
 };
 };
 
 

Some files were not shown because too many files changed in this diff