Ver Fonte

Merge tag 'mac80211-next-for-davem-2016-02-26' into next2

Here's another round of updates for -next:
 * big A-MSDU RX performance improvement (avoid linearize of paged RX)
 * rfkill changes: cleanups, documentation, platform properties
 * basic PBSS support in cfg80211
 * MU-MIMO action frame processing support
 * BlockAck reordering & duplicate detection offload support
 * various cleanups & little fixes
Emmanuel Grumbach há 9 anos atrás
pai
commit
51bcc7386a
100 ficheiros alterados com 3835 adições e 2175 exclusões
  1. 0 29
      Documentation/ABI/obsolete/sysfs-class-rfkill
  2. 13 0
      Documentation/ABI/removed/sysfs-class-rfkill
  3. 24 3
      Documentation/ABI/stable/sysfs-class-rfkill
  4. 0 2
      Documentation/cgroup-v1/00-INDEX
  5. 1 81
      Documentation/cgroup-v1/blkio-controller.txt
  6. 0 0
      Documentation/cgroup-v1/cgroups.txt
  7. 0 0
      Documentation/cgroup-v1/cpuacct.txt
  8. 0 0
      Documentation/cgroup-v1/cpusets.txt
  9. 0 0
      Documentation/cgroup-v1/devices.txt
  10. 0 0
      Documentation/cgroup-v1/freezer-subsystem.txt
  11. 0 0
      Documentation/cgroup-v1/hugetlb.txt
  12. 0 0
      Documentation/cgroup-v1/memcg_test.txt
  13. 0 0
      Documentation/cgroup-v1/memory.txt
  14. 0 0
      Documentation/cgroup-v1/net_cls.txt
  15. 0 0
      Documentation/cgroup-v1/net_prio.txt
  16. 0 0
      Documentation/cgroup-v1/pids.txt
  17. 1293 0
      Documentation/cgroup-v2.txt
  18. 0 647
      Documentation/cgroups/unified-hierarchy.txt
  19. 199 42
      Documentation/cpu-freq/intel-pstate.txt
  20. 2 2
      Documentation/cpu-freq/pcc-cpufreq.txt
  21. 17 0
      Documentation/devicetree/bindings/arm/cpus.txt
  22. 91 0
      Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt
  23. 93 39
      Documentation/devicetree/bindings/opp/opp.txt
  24. 17 0
      Documentation/networking/mac80211-injection.txt
  25. 1 1
      Documentation/power/pci.txt
  26. 6 0
      Documentation/power/runtime_pm.txt
  27. 2 0
      Documentation/rfkill.txt
  28. 11 0
      MAINTAINERS
  29. 14 14
      arch/arm/boot/dts/exynos4412.dtsi
  30. 10 7
      arch/arm/mach-tegra/board-paz00.c
  31. 5 7
      arch/ia64/kernel/ftrace.c
  32. 5 6
      arch/metag/kernel/ftrace.c
  33. 5 7
      arch/sh/kernel/ftrace.c
  34. 2 1
      arch/x86/Kconfig
  35. 13 38
      arch/x86/include/asm/iosf_mbi.h
  36. 14 7
      arch/x86/kernel/ftrace.c
  37. 2 5
      arch/x86/platform/atom/punit_atom_debug.c
  38. 10 18
      arch/x86/platform/intel-quark/imr.c
  39. 14 3
      drivers/acpi/Kconfig
  40. 5 4
      drivers/acpi/Makefile
  41. 15 1
      drivers/acpi/acpi_apd.c
  42. 804 0
      drivers/acpi/acpi_dbg.c
  43. 198 15
      drivers/acpi/acpi_lpss.c
  44. 1 1
      drivers/acpi/acpi_pnp.c
  45. 61 15
      drivers/acpi/acpi_video.c
  46. 2 2
      drivers/acpi/acpica/Makefile
  47. 32 26
      drivers/acpi/acpica/acapps.h
  48. 26 15
      drivers/acpi/acpica/acdebug.h
  49. 9 2
      drivers/acpi/acpica/acevents.h
  50. 2 6
      drivers/acpi/acpica/acglobal.h
  51. 11 1
      drivers/acpi/acpica/aclocal.h
  52. 0 11
      drivers/acpi/acpica/acmacros.h
  53. 10 2
      drivers/acpi/acpica/acnamesp.h
  54. 4 3
      drivers/acpi/acpica/acobject.h
  55. 5 5
      drivers/acpi/acpica/acopcode.h
  56. 7 1
      drivers/acpi/acpica/acparser.h
  57. 7 19
      drivers/acpi/acpica/acutils.h
  58. 3 2
      drivers/acpi/acpica/amlcode.h
  59. 3 8
      drivers/acpi/acpica/dbcmds.c
  60. 46 50
      drivers/acpi/acpica/dbdisply.c
  61. 15 108
      drivers/acpi/acpica/dbfileio.c
  62. 34 88
      drivers/acpi/acpica/dbinput.c
  63. 1 1
      drivers/acpi/acpica/dbnames.c
  64. 1 0
      drivers/acpi/acpica/dbstats.c
  65. 1 1
      drivers/acpi/acpica/dbtest.c
  66. 1 0
      drivers/acpi/acpica/dbutils.c
  67. 48 45
      drivers/acpi/acpica/dbxface.c
  68. 4 3
      drivers/acpi/acpica/dsargs.c
  69. 2 8
      drivers/acpi/acpica/dscontrol.c
  70. 3 2
      drivers/acpi/acpica/dsdebug.c
  71. 20 19
      drivers/acpi/acpica/dsfield.c
  72. 1 1
      drivers/acpi/acpica/dsinit.c
  73. 27 12
      drivers/acpi/acpica/dsmethod.c
  74. 11 9
      drivers/acpi/acpica/dsmthdat.c
  75. 11 8
      drivers/acpi/acpica/dsobject.c
  76. 12 9
      drivers/acpi/acpica/dsopcode.c
  77. 22 23
      drivers/acpi/acpica/dsutils.c
  78. 17 18
      drivers/acpi/acpica/dswexec.c
  79. 3 7
      drivers/acpi/acpica/dswload.c
  80. 5 5
      drivers/acpi/acpica/dswload2.c
  81. 1 0
      drivers/acpi/acpica/dswscope.c
  82. 1 1
      drivers/acpi/acpica/evgpe.c
  83. 1 0
      drivers/acpi/acpica/evgpeblk.c
  84. 1 0
      drivers/acpi/acpica/evgpeutil.c
  85. 95 70
      drivers/acpi/acpica/evhandler.c
  86. 3 2
      drivers/acpi/acpica/evmisc.c
  87. 92 22
      drivers/acpi/acpica/evregion.c
  88. 35 80
      drivers/acpi/acpica/evrgnini.c
  89. 4 5
      drivers/acpi/acpica/evxface.c
  90. 3 35
      drivers/acpi/acpica/evxfregn.c
  91. 4 4
      drivers/acpi/acpica/exconfig.c
  92. 4 5
      drivers/acpi/acpica/exconvrt.c
  93. 12 8
      drivers/acpi/acpica/excreate.c
  94. 56 347
      drivers/acpi/acpica/exdebug.c
  95. 4 2
      drivers/acpi/acpica/exdump.c
  96. 42 32
      drivers/acpi/acpica/exfield.c
  97. 20 15
      drivers/acpi/acpica/exfldio.c
  98. 41 8
      drivers/acpi/acpica/exmisc.c
  99. 60 22
      drivers/acpi/acpica/exmutex.c
  100. 2 2
      drivers/acpi/acpica/exnames.c

+ 0 - 29
Documentation/ABI/obsolete/sysfs-class-rfkill

@@ -1,29 +0,0 @@
-rfkill - radio frequency (RF) connector kill switch support
-
-For details to this subsystem look at Documentation/rfkill.txt.
-
-What:		/sys/class/rfkill/rfkill[0-9]+/state
-Date:		09-Jul-2007
-KernelVersion	v2.6.22
-Contact:	linux-wireless@vger.kernel.org
-Description: 	Current state of the transmitter.
-		This file is deprecated and scheduled to be removed in 2014,
-		because its not possible to express the 'soft and hard block'
-		state of the rfkill driver.
-Values: 	A numeric value.
-		0: RFKILL_STATE_SOFT_BLOCKED
-			transmitter is turned off by software
-		1: RFKILL_STATE_UNBLOCKED
-			transmitter is (potentially) active
-		2: RFKILL_STATE_HARD_BLOCKED
-			transmitter is forced off by something outside of
-			the driver's control.
-
-What:		/sys/class/rfkill/rfkill[0-9]+/claim
-Date:		09-Jul-2007
-KernelVersion	v2.6.22
-Contact:	linux-wireless@vger.kernel.org
-Description:	This file is deprecated because there no longer is a way to
-		claim just control over a single rfkill instance.
-		This file is scheduled to be removed in 2012.
-Values: 	0: Kernel handles events

+ 13 - 0
Documentation/ABI/removed/sysfs-class-rfkill

@@ -0,0 +1,13 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What:		/sys/class/rfkill/rfkill[0-9]+/claim
+Date:		09-Jul-2007
+KernelVersion	v2.6.22
+Contact:	linux-wireless@vger.kernel.org
+Description:	This file was deprecated because there no longer was a way to
+		claim just control over a single rfkill instance.
+		This file was scheduled to be removed in 2012, and was removed
+		in 2016.
+Values: 	0: Kernel handles events

+ 24 - 3
Documentation/ABI/stable/sysfs-class-rfkill

@@ -2,9 +2,8 @@ rfkill - radio frequency (RF) connector kill switch support
 
 
 For details to this subsystem look at Documentation/rfkill.txt.
 For details to this subsystem look at Documentation/rfkill.txt.
 
 
-For the deprecated /sys/class/rfkill/*/state and
-/sys/class/rfkill/*/claim knobs of this interface look in
-Documentation/ABI/obsolete/sysfs-class-rfkill.
+For the deprecated /sys/class/rfkill/*/claim knobs of this interface look in
+Documentation/ABI/removed/sysfs-class-rfkill.
 
 
 What: 		/sys/class/rfkill
 What: 		/sys/class/rfkill
 Date:		09-Jul-2007
 Date:		09-Jul-2007
@@ -42,6 +41,28 @@ Values: 	A numeric value.
 		1: true
 		1: true
 
 
 
 
+What:		/sys/class/rfkill/rfkill[0-9]+/state
+Date:		09-Jul-2007
+KernelVersion	v2.6.22
+Contact:	linux-wireless@vger.kernel.org
+Description: 	Current state of the transmitter.
+		This file was scheduled to be removed in 2014, but due to its
+		large number of users it will be sticking around for a bit
+		longer. Despite it being marked as stabe, the newer "hard" and
+		"soft" interfaces should be preffered, since it is not possible
+		to express the 'soft and hard block' state of the rfkill driver
+		through this interface. There will likely be another attempt to
+		remove it in the future.
+Values: 	A numeric value.
+		0: RFKILL_STATE_SOFT_BLOCKED
+			transmitter is turned off by software
+		1: RFKILL_STATE_UNBLOCKED
+			transmitter is (potentially) active
+		2: RFKILL_STATE_HARD_BLOCKED
+			transmitter is forced off by something outside of
+			the driver's control.
+
+
 What:		/sys/class/rfkill/rfkill[0-9]+/hard
 What:		/sys/class/rfkill/rfkill[0-9]+/hard
 Date:		12-March-2010
 Date:		12-March-2010
 KernelVersion	v2.6.34
 KernelVersion	v2.6.34

+ 0 - 2
Documentation/cgroups/00-INDEX → Documentation/cgroup-v1/00-INDEX

@@ -24,7 +24,5 @@ net_prio.txt
 	- Network priority cgroups details and usages.
 	- Network priority cgroups details and usages.
 pids.txt
 pids.txt
 	- Process number cgroups details and usages.
 	- Process number cgroups details and usages.
-resource_counter.txt
-	- Resource Counter API.
 unified-hierarchy.txt
 unified-hierarchy.txt
 	- Description the new/next cgroup interface.
 	- Description the new/next cgroup interface.

+ 1 - 81
Documentation/cgroups/blkio-controller.txt → Documentation/cgroup-v1/blkio-controller.txt

@@ -84,8 +84,7 @@ Throttling/Upper Limit policy
 
 
 - Run dd to read a file and see if rate is throttled to 1MB/s or not.
 - Run dd to read a file and see if rate is throttled to 1MB/s or not.
 
 
-		# dd if=/mnt/common/zerofile of=/dev/null bs=4K count=1024
-		# iflag=direct
+        # dd iflag=direct if=/mnt/common/zerofile of=/dev/null bs=4K count=1024
         1024+0 records in
         1024+0 records in
         1024+0 records out
         1024+0 records out
         4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
         4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
@@ -374,82 +373,3 @@ One can experience an overall throughput drop if you have created multiple
 groups and put applications in that group which are not driving enough
 groups and put applications in that group which are not driving enough
 IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
 IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
 on individual groups and throughput should improve.
 on individual groups and throughput should improve.
-
-Writeback
-=========
-
-Page cache is dirtied through buffered writes and shared mmaps and
-written asynchronously to the backing filesystem by the writeback
-mechanism.  Writeback sits between the memory and IO domains and
-regulates the proportion of dirty memory by balancing dirtying and
-write IOs.
-
-On traditional cgroup hierarchies, relationships between different
-controllers cannot be established making it impossible for writeback
-to operate accounting for cgroup resource restrictions and all
-writeback IOs are attributed to the root cgroup.
-
-If both the blkio and memory controllers are used on the v2 hierarchy
-and the filesystem supports cgroup writeback, writeback operations
-correctly follow the resource restrictions imposed by both memory and
-blkio controllers.
-
-Writeback examines both system-wide and per-cgroup dirty memory status
-and enforces the more restrictive of the two.  Also, writeback control
-parameters which are absolute values - vm.dirty_bytes and
-vm.dirty_background_bytes - are distributed across cgroups according
-to their current writeback bandwidth.
-
-There's a peculiarity stemming from the discrepancy in ownership
-granularity between memory controller and writeback.  While memory
-controller tracks ownership per page, writeback operates on inode
-basis.  cgroup writeback bridges the gap by tracking ownership by
-inode but migrating ownership if too many foreign pages, pages which
-don't match the current inode ownership, have been encountered while
-writing back the inode.
-
-This is a conscious design choice as writeback operations are
-inherently tied to inodes making strictly following page ownership
-complicated and inefficient.  The only use case which suffers from
-this compromise is multiple cgroups concurrently dirtying disjoint
-regions of the same inode, which is an unlikely use case and decided
-to be unsupported.  Note that as memory controller assigns page
-ownership on the first use and doesn't update it until the page is
-released, even if cgroup writeback strictly follows page ownership,
-multiple cgroups dirtying overlapping areas wouldn't work as expected.
-In general, write-sharing an inode across multiple cgroups is not well
-supported.
-
-Filesystem support for cgroup writeback
----------------------------------------
-
-A filesystem can make writeback IOs cgroup-aware by updating
-address_space_operations->writepage[s]() to annotate bio's using the
-following two functions.
-
-* wbc_init_bio(@wbc, @bio)
-
-  Should be called for each bio carrying writeback data and associates
-  the bio with the inode's owner cgroup.  Can be called anytime
-  between bio allocation and submission.
-
-* wbc_account_io(@wbc, @page, @bytes)
-
-  Should be called for each data segment being written out.  While
-  this function doesn't care exactly when it's called during the
-  writeback session, it's the easiest and most natural to call it as
-  data segments are added to a bio.
-
-With writeback bio's annotated, cgroup support can be enabled per
-super_block by setting MS_CGROUPWB in ->s_flags.  This allows for
-selective disabling of cgroup writeback support which is helpful when
-certain filesystem features, e.g. journaled data mode, are
-incompatible.
-
-wbc_init_bio() binds the specified bio to its cgroup.  Depending on
-the configuration, the bio may be executed at a lower priority and if
-the writeback session is holding shared resources, e.g. a journal
-entry, may lead to priority inversion.  There is no one easy solution
-for the problem.  Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_blkcg()
-directly.

+ 0 - 0
Documentation/cgroups/cgroups.txt → Documentation/cgroup-v1/cgroups.txt


+ 0 - 0
Documentation/cgroups/cpuacct.txt → Documentation/cgroup-v1/cpuacct.txt


+ 0 - 0
Documentation/cgroups/cpusets.txt → Documentation/cgroup-v1/cpusets.txt


+ 0 - 0
Documentation/cgroups/devices.txt → Documentation/cgroup-v1/devices.txt


+ 0 - 0
Documentation/cgroups/freezer-subsystem.txt → Documentation/cgroup-v1/freezer-subsystem.txt


+ 0 - 0
Documentation/cgroups/hugetlb.txt → Documentation/cgroup-v1/hugetlb.txt


+ 0 - 0
Documentation/cgroups/memcg_test.txt → Documentation/cgroup-v1/memcg_test.txt


+ 0 - 0
Documentation/cgroups/memory.txt → Documentation/cgroup-v1/memory.txt


+ 0 - 0
Documentation/cgroups/net_cls.txt → Documentation/cgroup-v1/net_cls.txt


+ 0 - 0
Documentation/cgroups/net_prio.txt → Documentation/cgroup-v1/net_prio.txt


+ 0 - 0
Documentation/cgroups/pids.txt → Documentation/cgroup-v1/pids.txt


+ 1293 - 0
Documentation/cgroup-v2.txt

@@ -0,0 +1,1293 @@
+
+Control Group v2
+
+October, 2015		Tejun Heo <tj@kernel.org>
+
+This is the authoritative documentation on the design, interface and
+conventions of cgroup v2.  It describes all userland-visible aspects
+of cgroup including core and specific controller behaviors.  All
+future changes must be reflected in this document.  Documentation for
+v1 is available under Documentation/cgroup-legacy/.
+
+CONTENTS
+
+1. Introduction
+  1-1. Terminology
+  1-2. What is cgroup?
+2. Basic Operations
+  2-1. Mounting
+  2-2. Organizing Processes
+  2-3. [Un]populated Notification
+  2-4. Controlling Controllers
+    2-4-1. Enabling and Disabling
+    2-4-2. Top-down Constraint
+    2-4-3. No Internal Process Constraint
+  2-5. Delegation
+    2-5-1. Model of Delegation
+    2-5-2. Delegation Containment
+  2-6. Guidelines
+    2-6-1. Organize Once and Control
+    2-6-2. Avoid Name Collisions
+3. Resource Distribution Models
+  3-1. Weights
+  3-2. Limits
+  3-3. Protections
+  3-4. Allocations
+4. Interface Files
+  4-1. Format
+  4-2. Conventions
+  4-3. Core Interface Files
+5. Controllers
+  5-1. CPU
+    5-1-1. CPU Interface Files
+  5-2. Memory
+    5-2-1. Memory Interface Files
+    5-2-2. Usage Guidelines
+    5-2-3. Memory Ownership
+  5-3. IO
+    5-3-1. IO Interface Files
+    5-3-2. Writeback
+P. Information on Kernel Programming
+  P-1. Filesystem Support for Writeback
+D. Deprecated v1 Core Features
+R. Issues with v1 and Rationales for v2
+  R-1. Multiple Hierarchies
+  R-2. Thread Granularity
+  R-3. Competition Between Inner Nodes and Threads
+  R-4. Other Interface Issues
+  R-5. Controller Issues and Remedies
+    R-5-1. Memory
+
+
+1. Introduction
+
+1-1. Terminology
+
+"cgroup" stands for "control group" and is never capitalized.  The
+singular form is used to designate the whole feature and also as a
+qualifier as in "cgroup controllers".  When explicitly referring to
+multiple individual control groups, the plural form "cgroups" is used.
+
+
+1-2. What is cgroup?
+
+cgroup is a mechanism to organize processes hierarchically and
+distribute system resources along the hierarchy in a controlled and
+configurable manner.
+
+cgroup is largely composed of two parts - the core and controllers.
+cgroup core is primarily responsible for hierarchically organizing
+processes.  A cgroup controller is usually responsible for
+distributing a specific type of system resource along the hierarchy
+although there are utility controllers which serve purposes other than
+resource distribution.
+
+cgroups form a tree structure and every process in the system belongs
+to one and only one cgroup.  All threads of a process belong to the
+same cgroup.  On creation, all processes are put in the cgroup that
+the parent process belongs to at the time.  A process can be migrated
+to another cgroup.  Migration of a process doesn't affect already
+existing descendant processes.
+
+Following certain structural constraints, controllers may be enabled or
+disabled selectively on a cgroup.  All controller behaviors are
+hierarchical - if a controller is enabled on a cgroup, it affects all
+processes which belong to the cgroups consisting the inclusive
+sub-hierarchy of the cgroup.  When a controller is enabled on a nested
+cgroup, it always restricts the resource distribution further.  The
+restrictions set closer to the root in the hierarchy can not be
+overridden from further away.
+
+
+2. Basic Operations
+
+2-1. Mounting
+
+Unlike v1, cgroup v2 has only single hierarchy.  The cgroup v2
+hierarchy can be mounted with the following mount command.
+
+  # mount -t cgroup2 none $MOUNT_POINT
+
+cgroup2 filesystem has the magic number 0x63677270 ("cgrp").  All
+controllers which support v2 and are not bound to a v1 hierarchy are
+automatically bound to the v2 hierarchy and show up at the root.
+Controllers which are not in active use in the v2 hierarchy can be
+bound to other hierarchies.  This allows mixing v2 hierarchy with the
+legacy v1 multiple hierarchies in a fully backward compatible way.
+
+A controller can be moved across hierarchies only after the controller
+is no longer referenced in its current hierarchy.  Because per-cgroup
+controller states are destroyed asynchronously and controllers may
+have lingering references, a controller may not show up immediately on
+the v2 hierarchy after the final umount of the previous hierarchy.
+Similarly, a controller should be fully disabled to be moved out of
+the unified hierarchy and it may take some time for the disabled
+controller to become available for other hierarchies; furthermore, due
+to inter-controller dependencies, other controllers may need to be
+disabled too.
+
+While useful for development and manual configurations, moving
+controllers dynamically between the v2 and other hierarchies is
+strongly discouraged for production use.  It is recommended to decide
+the hierarchies and controller associations before starting using the
+controllers after system boot.
+
+
+2-2. Organizing Processes
+
+Initially, only the root cgroup exists to which all processes belong.
+A child cgroup can be created by creating a sub-directory.
+
+  # mkdir $CGROUP_NAME
+
+A given cgroup may have multiple child cgroups forming a tree
+structure.  Each cgroup has a read-writable interface file
+"cgroup.procs".  When read, it lists the PIDs of all processes which
+belong to the cgroup one-per-line.  The PIDs are not ordered and the
+same PID may show up more than once if the process got moved to
+another cgroup and then back or the PID got recycled while reading.
+
+A process can be migrated into a cgroup by writing its PID to the
+target cgroup's "cgroup.procs" file.  Only one process can be migrated
+on a single write(2) call.  If a process is composed of multiple
+threads, writing the PID of any thread migrates all threads of the
+process.
+
+When a process forks a child process, the new process is born into the
+cgroup that the forking process belongs to at the time of the
+operation.  After exit, a process stays associated with the cgroup
+that it belonged to at the time of exit until it's reaped; however, a
+zombie process does not appear in "cgroup.procs" and thus can't be
+moved to another cgroup.
+
+A cgroup which doesn't have any children or live processes can be
+destroyed by removing the directory.  Note that a cgroup which doesn't
+have any children and is associated only with zombie processes is
+considered empty and can be removed.
+
+  # rmdir $CGROUP_NAME
+
+"/proc/$PID/cgroup" lists a process's cgroup membership.  If legacy
+cgroup is in use in the system, this file may contain multiple lines,
+one for each hierarchy.  The entry for cgroup v2 is always in the
+format "0::$PATH".
+
+  # cat /proc/842/cgroup
+  ...
+  0::/test-cgroup/test-cgroup-nested
+
+If the process becomes a zombie and the cgroup it was associated with
+is removed subsequently, " (deleted)" is appended to the path.
+
+  # cat /proc/842/cgroup
+  ...
+  0::/test-cgroup/test-cgroup-nested (deleted)
+
+
+2-3. [Un]populated Notification
+
+Each non-root cgroup has a "cgroup.events" file which contains
+"populated" field indicating whether the cgroup's sub-hierarchy has
+live processes in it.  Its value is 0 if there is no live process in
+the cgroup and its descendants; otherwise, 1.  poll and [id]notify
+events are triggered when the value changes.  This can be used, for
+example, to start a clean-up operation after all processes of a given
+sub-hierarchy have exited.  The populated state updates and
+notifications are recursive.  Consider the following sub-hierarchy
+where the numbers in the parentheses represent the numbers of processes
+in each cgroup.
+
+  A(4) - B(0) - C(1)
+              \ D(0)
+
+A, B and C's "populated" fields would be 1 while D's 0.  After the one
+process in C exits, B and C's "populated" fields would flip to "0" and
+file modified events will be generated on the "cgroup.events" files of
+both cgroups.
+
+
+2-4. Controlling Controllers
+
+2-4-1. Enabling and Disabling
+
+Each cgroup has a "cgroup.controllers" file which lists all
+controllers available for the cgroup to enable.
+
+  # cat cgroup.controllers
+  cpu io memory
+
+No controller is enabled by default.  Controllers can be enabled and
+disabled by writing to the "cgroup.subtree_control" file.
+
+  # echo "+cpu +memory -io" > cgroup.subtree_control
+
+Only controllers which are listed in "cgroup.controllers" can be
+enabled.  When multiple operations are specified as above, either they
+all succeed or fail.  If multiple operations on the same controller
+are specified, the last one is effective.
+
+Enabling a controller in a cgroup indicates that the distribution of
+the target resource across its immediate children will be controlled.
+Consider the following sub-hierarchy.  The enabled controllers are
+listed in parentheses.
+
+  A(cpu,memory) - B(memory) - C()
+                            \ D()
+
+As A has "cpu" and "memory" enabled, A will control the distribution
+of CPU cycles and memory to its children, in this case, B.  As B has
+"memory" enabled but not "CPU", C and D will compete freely on CPU
+cycles but their division of memory available to B will be controlled.
+
+As a controller regulates the distribution of the target resource to
+the cgroup's children, enabling it creates the controller's interface
+files in the child cgroups.  In the above example, enabling "cpu" on B
+would create the "cpu." prefixed controller interface files in C and
+D.  Likewise, disabling "memory" from B would remove the "memory."
+prefixed controller interface files from C and D.  This means that the
+controller interface files - anything which doesn't start with
+"cgroup." are owned by the parent rather than the cgroup itself.
+
+
+2-4-2. Top-down Constraint
+
+Resources are distributed top-down and a cgroup can further distribute
+a resource only if the resource has been distributed to it from the
+parent.  This means that all non-root "cgroup.subtree_control" files
+can only contain controllers which are enabled in the parent's
+"cgroup.subtree_control" file.  A controller can be enabled only if
+the parent has the controller enabled and a controller can't be
+disabled if one or more children have it enabled.
+
+
+2-4-3. No Internal Process Constraint
+
+Non-root cgroups can only distribute resources to their children when
+they don't have any processes of their own.  In other words, only
+cgroups which don't contain any processes can have controllers enabled
+in their "cgroup.subtree_control" files.
+
+This guarantees that, when a controller is looking at the part of the
+hierarchy which has it enabled, processes are always only on the
+leaves.  This rules out situations where child cgroups compete against
+internal processes of the parent.
+
+The root cgroup is exempt from this restriction.  Root contains
+processes and anonymous resource consumption which can't be associated
+with any other cgroups and requires special treatment from most
+controllers.  How resource consumption in the root cgroup is governed
+is up to each controller.
+
+Note that the restriction doesn't get in the way if there is no
+enabled controller in the cgroup's "cgroup.subtree_control".  This is
+important as otherwise it wouldn't be possible to create children of a
+populated cgroup.  To control resource distribution of a cgroup, the
+cgroup must create children and transfer all its processes to the
+children before enabling controllers in its "cgroup.subtree_control"
+file.
+
+
+2-5. Delegation
+
+2-5-1. Model of Delegation
+
+A cgroup can be delegated to a less privileged user by granting write
+access of the directory and its "cgroup.procs" file to the user.  Note
+that resource control interface files in a given directory control the
+distribution of the parent's resources and thus must not be delegated
+along with the directory.
+
+Once delegated, the user can build sub-hierarchy under the directory,
+organize processes as it sees fit and further distribute the resources
+it received from the parent.  The limits and other settings of all
+resource controllers are hierarchical and regardless of what happens
+in the delegated sub-hierarchy, nothing can escape the resource
+restrictions imposed by the parent.
+
+Currently, cgroup doesn't impose any restrictions on the number of
+cgroups in or nesting depth of a delegated sub-hierarchy; however,
+this may be limited explicitly in the future.
+
+
+2-5-2. Delegation Containment
+
+A delegated sub-hierarchy is contained in the sense that processes
+can't be moved into or out of the sub-hierarchy by the delegatee.  For
+a process with a non-root euid to migrate a target process into a
+cgroup by writing its PID to the "cgroup.procs" file, the following
+conditions must be met.
+
+- The writer's euid must match either uid or suid of the target process.
+
+- The writer must have write access to the "cgroup.procs" file.
+
+- The writer must have write access to the "cgroup.procs" file of the
+  common ancestor of the source and destination cgroups.
+
+The above three constraints ensure that while a delegatee may migrate
+processes around freely in the delegated sub-hierarchy it can't pull
+in from or push out to outside the sub-hierarchy.
+
+For an example, let's assume cgroups C0 and C1 have been delegated to
+user U0 who created C00, C01 under C0 and C10 under C1 as follows and
+all processes under C0 and C1 belong to U0.
+
+  ~~~~~~~~~~~~~ - C0 - C00
+  ~ cgroup    ~      \ C01
+  ~ hierarchy ~
+  ~~~~~~~~~~~~~ - C1 - C10
+
+Let's also say U0 wants to write the PID of a process which is
+currently in C10 into "C00/cgroup.procs".  U0 has write access to the
+file and uid match on the process; however, the common ancestor of the
+source cgroup C10 and the destination cgroup C00 is above the points
+of delegation and U0 would not have write access to its "cgroup.procs"
+files and thus the write will be denied with -EACCES.
+
+
+2-6. Guidelines
+
+2-6-1. Organize Once and Control
+
+Migrating a process across cgroups is a relatively expensive operation
+and stateful resources such as memory are not moved together with the
+process.  This is an explicit design decision as there often exist
+inherent trade-offs between migration and various hot paths in terms
+of synchronization cost.
+
+As such, migrating processes across cgroups frequently as a means to
+apply different resource restrictions is discouraged.  A workload
+should be assigned to a cgroup according to the system's logical and
+resource structure once on start-up.  Dynamic adjustments to resource
+distribution can be made by changing controller configuration through
+the interface files.
+
+
+2-6-2. Avoid Name Collisions
+
+Interface files for a cgroup and its children cgroups occupy the same
+directory and it is possible to create children cgroups which collide
+with interface files.
+
+All cgroup core interface files are prefixed with "cgroup." and each
+controller's interface files are prefixed with the controller name and
+a dot.  A controller's name is composed of lower case alphabets and
+'_'s but never begins with an '_' so it can be used as the prefix
+character for collision avoidance.  Also, interface file names won't
+start or end with terms which are often used in categorizing workloads
+such as job, service, slice, unit or workload.
+
+cgroup doesn't do anything to prevent name collisions and it's the
+user's responsibility to avoid them.
+
+
+3. Resource Distribution Models
+
+cgroup controllers implement several resource distribution schemes
+depending on the resource type and expected use cases.  This section
+describes major schemes in use along with their expected behaviors.
+
+
+3-1. Weights
+
+A parent's resource is distributed by adding up the weights of all
+active children and giving each the fraction matching the ratio of its
+weight against the sum.  As only children which can make use of the
+resource at the moment participate in the distribution, this is
+work-conserving.  Due to the dynamic nature, this model is usually
+used for stateless resources.
+
+All weights are in the range [1, 10000] with the default at 100.  This
+allows symmetric multiplicative biases in both directions at fine
+enough granularity while staying in the intuitive range.
+
+As long as the weight is in range, all configuration combinations are
+valid and there is no reason to reject configuration changes or
+process migrations.
+
+"cpu.weight" proportionally distributes CPU cycles to active children
+and is an example of this type.
+
+
+3-2. Limits
+
+A child can only consume upto the configured amount of the resource.
+Limits can be over-committed - the sum of the limits of children can
+exceed the amount of resource available to the parent.
+
+Limits are in the range [0, max] and defaults to "max", which is noop.
+
+As limits can be over-committed, all configuration combinations are
+valid and there is no reason to reject configuration changes or
+process migrations.
+
+"io.max" limits the maximum BPS and/or IOPS that a cgroup can consume
+on an IO device and is an example of this type.
+
+
+3-3. Protections
+
+A cgroup is protected to be allocated upto the configured amount of
+the resource if the usages of all its ancestors are under their
+protected levels.  Protections can be hard guarantees or best effort
+soft boundaries.  Protections can also be over-committed in which case
+only upto the amount available to the parent is protected among
+children.
+
+Protections are in the range [0, max] and defaults to 0, which is
+noop.
+
+As protections can be over-committed, all configuration combinations
+are valid and there is no reason to reject configuration changes or
+process migrations.
+
+"memory.low" implements best-effort memory protection and is an
+example of this type.
+
+
+3-4. Allocations
+
+A cgroup is exclusively allocated a certain amount of a finite
+resource.  Allocations can't be over-committed - the sum of the
+allocations of children can not exceed the amount of resource
+available to the parent.
+
+Allocations are in the range [0, max] and defaults to 0, which is no
+resource.
+
+As allocations can't be over-committed, some configuration
+combinations are invalid and should be rejected.  Also, if the
+resource is mandatory for execution of processes, process migrations
+may be rejected.
+
+"cpu.rt.max" hard-allocates realtime slices and is an example of this
+type.
+
+
+4. Interface Files
+
+4-1. Format
+
+All interface files should be in one of the following formats whenever
+possible.
+
+  New-line separated values
+  (when only one value can be written at once)
+
+	VAL0\n
+	VAL1\n
+	...
+
+  Space separated values
+  (when read-only or multiple values can be written at once)
+
+	VAL0 VAL1 ...\n
+
+  Flat keyed
+
+	KEY0 VAL0\n
+	KEY1 VAL1\n
+	...
+
+  Nested keyed
+
+	KEY0 SUB_KEY0=VAL00 SUB_KEY1=VAL01...
+	KEY1 SUB_KEY0=VAL10 SUB_KEY1=VAL11...
+	...
+
+For a writable file, the format for writing should generally match
+reading; however, controllers may allow omitting later fields or
+implement restricted shortcuts for most common use cases.
+
+For both flat and nested keyed files, only the values for a single key
+can be written at a time.  For nested keyed files, the sub key pairs
+may be specified in any order and not all pairs have to be specified.
+
+
+4-2. Conventions
+
+- Settings for a single feature should be contained in a single file.
+
+- The root cgroup should be exempt from resource control and thus
+  shouldn't have resource control interface files.  Also,
+  informational files on the root cgroup which end up showing global
+  information available elsewhere shouldn't exist.
+
+- If a controller implements weight based resource distribution, its
+  interface file should be named "weight" and have the range [1,
+  10000] with 100 as the default.  The values are chosen to allow
+  enough and symmetric bias in both directions while keeping it
+  intuitive (the default is 100%).
+
+- If a controller implements an absolute resource guarantee and/or
+  limit, the interface files should be named "min" and "max"
+  respectively.  If a controller implements best effort resource
+  guarantee and/or limit, the interface files should be named "low"
+  and "high" respectively.
+
+  In the above four control files, the special token "max" should be
+  used to represent upward infinity for both reading and writing.
+
+- If a setting has a configurable default value and keyed specific
+  overrides, the default entry should be keyed with "default" and
+  appear as the first entry in the file.
+
+  The default value can be updated by writing either "default $VAL" or
+  "$VAL".
+
+  When writing to update a specific override, "default" can be used as
+  the value to indicate removal of the override.  Override entries
+  with "default" as the value must not appear when read.
+
+  For example, a setting which is keyed by major:minor device numbers
+  with integer values may look like the following.
+
+    # cat cgroup-example-interface-file
+    default 150
+    8:0 300
+
+  The default value can be updated by
+
+    # echo 125 > cgroup-example-interface-file
+
+  or
+
+    # echo "default 125" > cgroup-example-interface-file
+
+  An override can be set by
+
+    # echo "8:16 170" > cgroup-example-interface-file
+
+  and cleared by
+
+    # echo "8:0 default" > cgroup-example-interface-file
+    # cat cgroup-example-interface-file
+    default 125
+    8:16 170
+
+- For events which are not very high frequency, an interface file
+  "events" should be created which lists event key value pairs.
+  Whenever a notifiable event happens, file modified event should be
+  generated on the file.
+
+
+4-3. Core Interface Files
+
+All cgroup core files are prefixed with "cgroup."
+
+  cgroup.procs
+
+	A read-write new-line separated values file which exists on
+	all cgroups.
+
+	When read, it lists the PIDs of all processes which belong to
+	the cgroup one-per-line.  The PIDs are not ordered and the
+	same PID may show up more than once if the process got moved
+	to another cgroup and then back or the PID got recycled while
+	reading.
+
+	A PID can be written to migrate the process associated with
+	the PID to the cgroup.  The writer should match all of the
+	following conditions.
+
+	- Its euid is either root or must match either uid or suid of
+          the target process.
+
+	- It must have write access to the "cgroup.procs" file.
+
+	- It must have write access to the "cgroup.procs" file of the
+	  common ancestor of the source and destination cgroups.
+
+	When delegating a sub-hierarchy, write access to this file
+	should be granted along with the containing directory.
+
+  cgroup.controllers
+
+	A read-only space separated values file which exists on all
+	cgroups.
+
+	It shows space separated list of all controllers available to
+	the cgroup.  The controllers are not ordered.
+
+  cgroup.subtree_control
+
+	A read-write space separated values file which exists on all
+	cgroups.  Starts out empty.
+
+	When read, it shows space separated list of the controllers
+	which are enabled to control resource distribution from the
+	cgroup to its children.
+
+	Space separated list of controllers prefixed with '+' or '-'
+	can be written to enable or disable controllers.  A controller
+	name prefixed with '+' enables the controller and '-'
+	disables.  If a controller appears more than once on the list,
+	the last one is effective.  When multiple enable and disable
+	operations are specified, either all succeed or all fail.
+
+  cgroup.events
+
+	A read-only flat-keyed file which exists on non-root cgroups.
+	The following entries are defined.  Unless specified
+	otherwise, a value change in this file generates a file
+	modified event.
+
+	  populated
+
+		1 if the cgroup or its descendants contains any live
+		processes; otherwise, 0.
+
+
+5. Controllers
+
+5-1. CPU
+
+[NOTE: The interface for the cpu controller hasn't been merged yet]
+
+The "cpu" controllers regulates distribution of CPU cycles.  This
+controller implements weight and absolute bandwidth limit models for
+normal scheduling policy and absolute bandwidth allocation model for
+realtime scheduling policy.
+
+
+5-1-1. CPU Interface Files
+
+All time durations are in microseconds.
+
+  cpu.stat
+
+	A read-only flat-keyed file which exists on non-root cgroups.
+
+	It reports the following six stats.
+
+	  usage_usec
+	  user_usec
+	  system_usec
+	  nr_periods
+	  nr_throttled
+	  throttled_usec
+
+  cpu.weight
+
+	A read-write single value file which exists on non-root
+	cgroups.  The default is "100".
+
+	The weight in the range [1, 10000].
+
+  cpu.max
+
+	A read-write two value file which exists on non-root cgroups.
+	The default is "max 100000".
+
+	The maximum bandwidth limit.  It's in the following format.
+
+	  $MAX $PERIOD
+
+	which indicates that the group may consume upto $MAX in each
+	$PERIOD duration.  "max" for $MAX indicates no limit.  If only
+	one number is written, $MAX is updated.
+
+  cpu.rt.max
+
+  [NOTE: The semantics of this file is still under discussion and the
+   interface hasn't been merged yet]
+
+	A read-write two value file which exists on all cgroups.
+	The default is "0 100000".
+
+	The maximum realtime runtime allocation.  Over-committing
+	configurations are disallowed and process migrations are
+	rejected if not enough bandwidth is available.  It's in the
+	following format.
+
+	  $MAX $PERIOD
+
+	which indicates that the group may consume upto $MAX in each
+	$PERIOD duration.  If only one number is written, $MAX is
+	updated.
+
+
+5-2. Memory
+
+The "memory" controller regulates distribution of memory.  Memory is
+stateful and implements both limit and protection models.  Due to the
+intertwining between memory usage and reclaim pressure and the
+stateful nature of memory, the distribution model is relatively
+complex.
+
+While not completely water-tight, all major memory usages by a given
+cgroup are tracked so that the total memory consumption can be
+accounted and controlled to a reasonable extent.  Currently, the
+following types of memory usages are tracked.
+
+- Userland memory - page cache and anonymous memory.
+
+- Kernel data structures such as dentries and inodes.
+
+- TCP socket buffers.
+
+The above list may expand in the future for better coverage.
+
+
+5-2-1. Memory Interface Files
+
+All memory amounts are in bytes.  If a value which is not aligned to
+PAGE_SIZE is written, the value may be rounded up to the closest
+PAGE_SIZE multiple when read back.
+
+  memory.current
+
+	A read-only single value file which exists on non-root
+	cgroups.
+
+	The total amount of memory currently being used by the cgroup
+	and its descendants.
+
+  memory.low
+
+	A read-write single value file which exists on non-root
+	cgroups.  The default is "0".
+
+	Best-effort memory protection.  If the memory usages of a
+	cgroup and all its ancestors are below their low boundaries,
+	the cgroup's memory won't be reclaimed unless memory can be
+	reclaimed from unprotected cgroups.
+
+	Putting more memory than generally available under this
+	protection is discouraged.
+
+  memory.high
+
+	A read-write single value file which exists on non-root
+	cgroups.  The default is "max".
+
+	Memory usage throttle limit.  This is the main mechanism to
+	control memory usage of a cgroup.  If a cgroup's usage goes
+	over the high boundary, the processes of the cgroup are
+	throttled and put under heavy reclaim pressure.
+
+	Going over the high limit never invokes the OOM killer and
+	under extreme conditions the limit may be breached.
+
+  memory.max
+
+	A read-write single value file which exists on non-root
+	cgroups.  The default is "max".
+
+	Memory usage hard limit.  This is the final protection
+	mechanism.  If a cgroup's memory usage reaches this limit and
+	can't be reduced, the OOM killer is invoked in the cgroup.
+	Under certain circumstances, the usage may go over the limit
+	temporarily.
+
+	This is the ultimate protection mechanism.  As long as the
+	high limit is used and monitored properly, this limit's
+	utility is limited to providing the final safety net.
+
+  memory.events
+
+	A read-only flat-keyed file which exists on non-root cgroups.
+	The following entries are defined.  Unless specified
+	otherwise, a value change in this file generates a file
+	modified event.
+
+	  low
+
+		The number of times the cgroup is reclaimed due to
+		high memory pressure even though its usage is under
+		the low boundary.  This usually indicates that the low
+		boundary is over-committed.
+
+	  high
+
+		The number of times processes of the cgroup are
+		throttled and routed to perform direct memory reclaim
+		because the high memory boundary was exceeded.  For a
+		cgroup whose memory usage is capped by the high limit
+		rather than global memory pressure, this event's
+		occurrences are expected.
+
+	  max
+
+		The number of times the cgroup's memory usage was
+		about to go over the max boundary.  If direct reclaim
+		fails to bring it down, the OOM killer is invoked.
+
+	  oom
+
+		The number of times the OOM killer has been invoked in
+		the cgroup.  This may not exactly match the number of
+		processes killed but should generally be close.
+
+
+5-2-2. General Usage
+
+"memory.high" is the main mechanism to control memory usage.
+Over-committing on high limit (sum of high limits > available memory)
+and letting global memory pressure to distribute memory according to
+usage is a viable strategy.
+
+Because breach of the high limit doesn't trigger the OOM killer but
+throttles the offending cgroup, a management agent has ample
+opportunities to monitor and take appropriate actions such as granting
+more memory or terminating the workload.
+
+Determining whether a cgroup has enough memory is not trivial as
+memory usage doesn't indicate whether the workload can benefit from
+more memory.  For example, a workload which writes data received from
+network to a file can use all available memory but can also operate as
+performant with a small amount of memory.  A measure of memory
+pressure - how much the workload is being impacted due to lack of
+memory - is necessary to determine whether a workload needs more
+memory; unfortunately, memory pressure monitoring mechanism isn't
+implemented yet.
+
+
+5-2-3. Memory Ownership
+
+A memory area is charged to the cgroup which instantiated it and stays
+charged to the cgroup until the area is released.  Migrating a process
+to a different cgroup doesn't move the memory usages that it
+instantiated while in the previous cgroup to the new cgroup.
+
+A memory area may be used by processes belonging to different cgroups.
+To which cgroup the area will be charged is in-deterministic; however,
+over time, the memory area is likely to end up in a cgroup which has
+enough memory allowance to avoid high reclaim pressure.
+
+If a cgroup sweeps a considerable amount of memory which is expected
+to be accessed repeatedly by other cgroups, it may make sense to use
+POSIX_FADV_DONTNEED to relinquish the ownership of memory areas
+belonging to the affected files to ensure correct memory ownership.
+
+
+5-3. IO
+
+The "io" controller regulates the distribution of IO resources.  This
+controller implements both weight based and absolute bandwidth or IOPS
+limit distribution; however, weight based distribution is available
+only if cfq-iosched is in use and neither scheme is available for
+blk-mq devices.
+
+
+5-3-1. IO Interface Files
+
+  io.stat
+
+	A read-only nested-keyed file which exists on non-root
+	cgroups.
+
+	Lines are keyed by $MAJ:$MIN device numbers and not ordered.
+	The following nested keys are defined.
+
+	  rbytes	Bytes read
+	  wbytes	Bytes written
+	  rios		Number of read IOs
+	  wios		Number of write IOs
+
+	An example read output follows.
+
+	  8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353
+	  8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252
+
+  io.weight
+
+	A read-write flat-keyed file which exists on non-root cgroups.
+	The default is "default 100".
+
+	The first line is the default weight applied to devices
+	without specific override.  The rest are overrides keyed by
+	$MAJ:$MIN device numbers and not ordered.  The weights are in
+	the range [1, 10000] and specifies the relative amount IO time
+	the cgroup can use in relation to its siblings.
+
+	The default weight can be updated by writing either "default
+	$WEIGHT" or simply "$WEIGHT".  Overrides can be set by writing
+	"$MAJ:$MIN $WEIGHT" and unset by writing "$MAJ:$MIN default".
+
+	An example read output follows.
+
+	  default 100
+	  8:16 200
+	  8:0 50
+
+  io.max
+
+	A read-write nested-keyed file which exists on non-root
+	cgroups.
+
+	BPS and IOPS based IO limit.  Lines are keyed by $MAJ:$MIN
+	device numbers and not ordered.  The following nested keys are
+	defined.
+
+	  rbps		Max read bytes per second
+	  wbps		Max write bytes per second
+	  riops		Max read IO operations per second
+	  wiops		Max write IO operations per second
+
+	When writing, any number of nested key-value pairs can be
+	specified in any order.  "max" can be specified as the value
+	to remove a specific limit.  If the same key is specified
+	multiple times, the outcome is undefined.
+
+	BPS and IOPS are measured in each IO direction and IOs are
+	delayed if limit is reached.  Temporary bursts are allowed.
+
+	Setting read limit at 2M BPS and write at 120 IOPS for 8:16.
+
+	  echo "8:16 rbps=2097152 wiops=120" > io.max
+
+	Reading returns the following.
+
+	  8:16 rbps=2097152 wbps=max riops=max wiops=120
+
+	Write IOPS limit can be removed by writing the following.
+
+	  echo "8:16 wiops=max" > io.max
+
+	Reading now returns the following.
+
+	  8:16 rbps=2097152 wbps=max riops=max wiops=max
+
+
+5-3-2. Writeback
+
+Page cache is dirtied through buffered writes and shared mmaps and
+written asynchronously to the backing filesystem by the writeback
+mechanism.  Writeback sits between the memory and IO domains and
+regulates the proportion of dirty memory by balancing dirtying and
+write IOs.
+
+The io controller, in conjunction with the memory controller,
+implements control of page cache writeback IOs.  The memory controller
+defines the memory domain that dirty memory ratio is calculated and
+maintained for and the io controller defines the io domain which
+writes out dirty pages for the memory domain.  Both system-wide and
+per-cgroup dirty memory states are examined and the more restrictive
+of the two is enforced.
+
+cgroup writeback requires explicit support from the underlying
+filesystem.  Currently, cgroup writeback is implemented on ext2, ext4
+and btrfs.  On other filesystems, all writeback IOs are attributed to
+the root cgroup.
+
+There are inherent differences in memory and writeback management
+which affects how cgroup ownership is tracked.  Memory is tracked per
+page while writeback per inode.  For the purpose of writeback, an
+inode is assigned to a cgroup and all IO requests to write dirty pages
+from the inode are attributed to that cgroup.
+
+As cgroup ownership for memory is tracked per page, there can be pages
+which are associated with different cgroups than the one the inode is
+associated with.  These are called foreign pages.  The writeback
+constantly keeps track of foreign pages and, if a particular foreign
+cgroup becomes the majority over a certain period of time, switches
+the ownership of the inode to that cgroup.
+
+While this model is enough for most use cases where a given inode is
+mostly dirtied by a single cgroup even when the main writing cgroup
+changes over time, use cases where multiple cgroups write to a single
+inode simultaneously are not supported well.  In such circumstances, a
+significant portion of IOs are likely to be attributed incorrectly.
+As memory controller assigns page ownership on the first use and
+doesn't update it until the page is released, even if writeback
+strictly follows page ownership, multiple cgroups dirtying overlapping
+areas wouldn't work as expected.  It's recommended to avoid such usage
+patterns.
+
+The sysctl knobs which affect writeback behavior are applied to cgroup
+writeback as follows.
+
+  vm.dirty_background_ratio
+  vm.dirty_ratio
+
+	These ratios apply the same to cgroup writeback with the
+	amount of available memory capped by limits imposed by the
+	memory controller and system-wide clean memory.
+
+  vm.dirty_background_bytes
+  vm.dirty_bytes
+
+	For cgroup writeback, this is calculated into ratio against
+	total available memory and applied the same way as
+	vm.dirty[_background]_ratio.
+
+
+P. Information on Kernel Programming
+
+This section contains kernel programming information in the areas
+where interacting with cgroup is necessary.  cgroup core and
+controllers are not covered.
+
+
+P-1. Filesystem Support for Writeback
+
+A filesystem can support cgroup writeback by updating
+address_space_operations->writepage[s]() to annotate bio's using the
+following two functions.
+
+  wbc_init_bio(@wbc, @bio)
+
+	Should be called for each bio carrying writeback data and
+	associates the bio with the inode's owner cgroup.  Can be
+	called anytime between bio allocation and submission.
+
+  wbc_account_io(@wbc, @page, @bytes)
+
+	Should be called for each data segment being written out.
+	While this function doesn't care exactly when it's called
+	during the writeback session, it's the easiest and most
+	natural to call it as data segments are added to a bio.
+
+With writeback bio's annotated, cgroup support can be enabled per
+super_block by setting SB_I_CGROUPWB in ->s_iflags.  This allows for
+selective disabling of cgroup writeback support which is helpful when
+certain filesystem features, e.g. journaled data mode, are
+incompatible.
+
+wbc_init_bio() binds the specified bio to its cgroup.  Depending on
+the configuration, the bio may be executed at a lower priority and if
+the writeback session is holding shared resources, e.g. a journal
+entry, may lead to priority inversion.  There is no one easy solution
+for the problem.  Filesystems can try to work around specific problem
+cases by skipping wbc_init_bio() or using bio_associate_blkcg()
+directly.
+
+
+D. Deprecated v1 Core Features
+
+- Multiple hierarchies including named ones are not supported.
+
+- All mount options and remounting are not supported.
+
+- The "tasks" file is removed and "cgroup.procs" is not sorted.
+
+- "cgroup.clone_children" is removed.
+
+- /proc/cgroups is meaningless for v2.  Use "cgroup.controllers" file
+  at the root instead.
+
+
+R. Issues with v1 and Rationales for v2
+
+R-1. Multiple Hierarchies
+
+cgroup v1 allowed an arbitrary number of hierarchies and each
+hierarchy could host any number of controllers.  While this seemed to
+provide a high level of flexibility, it wasn't useful in practice.
+
+For example, as there is only one instance of each controller, utility
+type controllers such as freezer which can be useful in all
+hierarchies could only be used in one.  The issue is exacerbated by
+the fact that controllers couldn't be moved to another hierarchy once
+hierarchies were populated.  Another issue was that all controllers
+bound to a hierarchy were forced to have exactly the same view of the
+hierarchy.  It wasn't possible to vary the granularity depending on
+the specific controller.
+
+In practice, these issues heavily limited which controllers could be
+put on the same hierarchy and most configurations resorted to putting
+each controller on its own hierarchy.  Only closely related ones, such
+as the cpu and cpuacct controllers, made sense to be put on the same
+hierarchy.  This often meant that userland ended up managing multiple
+similar hierarchies repeating the same steps on each hierarchy
+whenever a hierarchy management operation was necessary.
+
+Furthermore, support for multiple hierarchies came at a steep cost.
+It greatly complicated cgroup core implementation but more importantly
+the support for multiple hierarchies restricted how cgroup could be
+used in general and what controllers was able to do.
+
+There was no limit on how many hierarchies there might be, which meant
+that a thread's cgroup membership couldn't be described in finite
+length.  The key might contain any number of entries and was unlimited
+in length, which made it highly awkward to manipulate and led to
+addition of controllers which existed only to identify membership,
+which in turn exacerbated the original problem of proliferating number
+of hierarchies.
+
+Also, as a controller couldn't have any expectation regarding the
+topologies of hierarchies other controllers might be on, each
+controller had to assume that all other controllers were attached to
+completely orthogonal hierarchies.  This made it impossible, or at
+least very cumbersome, for controllers to cooperate with each other.
+
+In most use cases, putting controllers on hierarchies which are
+completely orthogonal to each other isn't necessary.  What usually is
+called for is the ability to have differing levels of granularity
+depending on the specific controller.  In other words, hierarchy may
+be collapsed from leaf towards root when viewed from specific
+controllers.  For example, a given configuration might not care about
+how memory is distributed beyond a certain level while still wanting
+to control how CPU cycles are distributed.
+
+
+R-2. Thread Granularity
+
+cgroup v1 allowed threads of a process to belong to different cgroups.
+This didn't make sense for some controllers and those controllers
+ended up implementing different ways to ignore such situations but
+much more importantly it blurred the line between API exposed to
+individual applications and system management interface.
+
+Generally, in-process knowledge is available only to the process
+itself; thus, unlike service-level organization of processes,
+categorizing threads of a process requires active participation from
+the application which owns the target process.
+
+cgroup v1 had an ambiguously defined delegation model which got abused
+in combination with thread granularity.  cgroups were delegated to
+individual applications so that they can create and manage their own
+sub-hierarchies and control resource distributions along them.  This
+effectively raised cgroup to the status of a syscall-like API exposed
+to lay programs.
+
+First of all, cgroup has a fundamentally inadequate interface to be
+exposed this way.  For a process to access its own knobs, it has to
+extract the path on the target hierarchy from /proc/self/cgroup,
+construct the path by appending the name of the knob to the path, open
+and then read and/or write to it.  This is not only extremely clunky
+and unusual but also inherently racy.  There is no conventional way to
+define transaction across the required steps and nothing can guarantee
+that the process would actually be operating on its own sub-hierarchy.
+
+cgroup controllers implemented a number of knobs which would never be
+accepted as public APIs because they were just adding control knobs to
+system-management pseudo filesystem.  cgroup ended up with interface
+knobs which were not properly abstracted or refined and directly
+revealed kernel internal details.  These knobs got exposed to
+individual applications through the ill-defined delegation mechanism
+effectively abusing cgroup as a shortcut to implementing public APIs
+without going through the required scrutiny.
+
+This was painful for both userland and kernel.  Userland ended up with
+misbehaving and poorly abstracted interfaces and kernel exposing and
+locked into constructs inadvertently.
+
+
+R-3. Competition Between Inner Nodes and Threads
+
+cgroup v1 allowed threads to be in any cgroups which created an
+interesting problem where threads belonging to a parent cgroup and its
+children cgroups competed for resources.  This was nasty as two
+different types of entities competed and there was no obvious way to
+settle it.  Different controllers did different things.
+
+The cpu controller considered threads and cgroups as equivalents and
+mapped nice levels to cgroup weights.  This worked for some cases but
+fell flat when children wanted to be allocated specific ratios of CPU
+cycles and the number of internal threads fluctuated - the ratios
+constantly changed as the number of competing entities fluctuated.
+There also were other issues.  The mapping from nice level to weight
+wasn't obvious or universal, and there were various other knobs which
+simply weren't available for threads.
+
+The io controller implicitly created a hidden leaf node for each
+cgroup to host the threads.  The hidden leaf had its own copies of all
+the knobs with "leaf_" prefixed.  While this allowed equivalent
+control over internal threads, it was with serious drawbacks.  It
+always added an extra layer of nesting which wouldn't be necessary
+otherwise, made the interface messy and significantly complicated the
+implementation.
+
+The memory controller didn't have a way to control what happened
+between internal tasks and child cgroups and the behavior was not
+clearly defined.  There were attempts to add ad-hoc behaviors and
+knobs to tailor the behavior to specific workloads which would have
+led to problems extremely difficult to resolve in the long term.
+
+Multiple controllers struggled with internal tasks and came up with
+different ways to deal with it; unfortunately, all the approaches were
+severely flawed and, furthermore, the widely different behaviors
+made cgroup as a whole highly inconsistent.
+
+This clearly is a problem which needs to be addressed from cgroup core
+in a uniform way.
+
+
+R-4. Other Interface Issues
+
+cgroup v1 grew without oversight and developed a large number of
+idiosyncrasies and inconsistencies.  One issue on the cgroup core side
+was how an empty cgroup was notified - a userland helper binary was
+forked and executed for each event.  The event delivery wasn't
+recursive or delegatable.  The limitations of the mechanism also led
+to in-kernel event delivery filtering mechanism further complicating
+the interface.
+
+Controller interfaces were problematic too.  An extreme example is
+controllers completely ignoring hierarchical organization and treating
+all cgroups as if they were all located directly under the root
+cgroup.  Some controllers exposed a large amount of inconsistent
+implementation details to userland.
+
+There also was no consistency across controllers.  When a new cgroup
+was created, some controllers defaulted to not imposing extra
+restrictions while others disallowed any resource usage until
+explicitly configured.  Configuration knobs for the same type of
+control used widely differing naming schemes and formats.  Statistics
+and information knobs were named arbitrarily and used different
+formats and units even in the same controller.
+
+cgroup v2 establishes common conventions where appropriate and updates
+controllers so that they expose minimal and consistent interfaces.
+
+
+R-5. Controller Issues and Remedies
+
+R-5-1. Memory
+
+The original lower boundary, the soft limit, is defined as a limit
+that is per default unset.  As a result, the set of cgroups that
+global reclaim prefers is opt-in, rather than opt-out.  The costs for
+optimizing these mostly negative lookups are so high that the
+implementation, despite its enormous size, does not even provide the
+basic desirable behavior.  First off, the soft limit has no
+hierarchical meaning.  All configured groups are organized in a global
+rbtree and treated like equal peers, regardless where they are located
+in the hierarchy.  This makes subtree delegation impossible.  Second,
+the soft limit reclaim pass is so aggressive that it not just
+introduces high allocation latencies into the system, but also impacts
+system performance due to overreclaim, to the point where the feature
+becomes self-defeating.
+
+The memory.low boundary on the other hand is a top-down allocated
+reserve.  A cgroup enjoys reclaim protection when it and all its
+ancestors are below their low boundaries, which makes delegation of
+subtrees possible.  Secondly, new cgroups have no reserve per default
+and in the common case most cgroups are eligible for the preferred
+reclaim pass.  This allows the new low boundary to be efficiently
+implemented with just a minor addition to the generic reclaim code,
+without the need for out-of-band data structures and reclaim passes.
+Because the generic reclaim code considers all cgroups except for the
+ones running low in the preferred first reclaim pass, overreclaim of
+individual groups is eliminated as well, resulting in much better
+overall workload performance.
+
+The original high boundary, the hard limit, is defined as a strict
+limit that can not budge, even if the OOM killer has to be called.
+But this generally goes against the goal of making the most out of the
+available memory.  The memory consumption of workloads varies during
+runtime, and that requires users to overcommit.  But doing that with a
+strict upper limit requires either a fairly accurate prediction of the
+working set size or adding slack to the limit.  Since working set size
+estimation is hard and error prone, and getting it wrong results in
+OOM kills, most users tend to err on the side of a looser limit and
+end up wasting precious resources.
+
+The memory.high boundary on the other hand can be set much more
+conservatively.  When hit, it throttles allocations by forcing them
+into direct reclaim to work off the excess, but it never invokes the
+OOM killer.  As a result, a high boundary that is chosen too
+aggressively will not terminate the processes, but instead it will
+lead to gradual performance degradation.  The user can monitor this
+and make corrections until the minimal memory footprint that still
+gives acceptable performance is found.
+
+In extreme cases, with many concurrent allocations and a complete
+breakdown of reclaim progress within the group, the high boundary can
+be exceeded.  But even then it's mostly better to satisfy the
+allocation from the slack available in other groups or the rest of the
+system than killing the group.  Otherwise, memory.max is there to
+limit this type of spillover and ultimately contain buggy or even
+malicious applications.

+ 0 - 647
Documentation/cgroups/unified-hierarchy.txt

@@ -1,647 +0,0 @@
-
-Cgroup unified hierarchy
-
-April, 2014		Tejun Heo <tj@kernel.org>
-
-This document describes the changes made by unified hierarchy and
-their rationales.  It will eventually be merged into the main cgroup
-documentation.
-
-CONTENTS
-
-1. Background
-2. Basic Operation
-  2-1. Mounting
-  2-2. cgroup.subtree_control
-  2-3. cgroup.controllers
-3. Structural Constraints
-  3-1. Top-down
-  3-2. No internal tasks
-4. Delegation
-  4-1. Model of delegation
-  4-2. Common ancestor rule
-5. Other Changes
-  5-1. [Un]populated Notification
-  5-2. Other Core Changes
-  5-3. Controller File Conventions
-    5-3-1. Format
-    5-3-2. Control Knobs
-  5-4. Per-Controller Changes
-    5-4-1. io
-    5-4-2. cpuset
-    5-4-3. memory
-6. Planned Changes
-  6-1. CAP for resource control
-
-
-1. Background
-
-cgroup allows an arbitrary number of hierarchies and each hierarchy
-can host any number of controllers.  While this seems to provide a
-high level of flexibility, it isn't quite useful in practice.
-
-For example, as there is only one instance of each controller, utility
-type controllers such as freezer which can be useful in all
-hierarchies can only be used in one.  The issue is exacerbated by the
-fact that controllers can't be moved around once hierarchies are
-populated.  Another issue is that all controllers bound to a hierarchy
-are forced to have exactly the same view of the hierarchy.  It isn't
-possible to vary the granularity depending on the specific controller.
-
-In practice, these issues heavily limit which controllers can be put
-on the same hierarchy and most configurations resort to putting each
-controller on its own hierarchy.  Only closely related ones, such as
-the cpu and cpuacct controllers, make sense to put on the same
-hierarchy.  This often means that userland ends up managing multiple
-similar hierarchies repeating the same steps on each hierarchy
-whenever a hierarchy management operation is necessary.
-
-Unfortunately, support for multiple hierarchies comes at a steep cost.
-Internal implementation in cgroup core proper is dazzlingly
-complicated but more importantly the support for multiple hierarchies
-restricts how cgroup is used in general and what controllers can do.
-
-There's no limit on how many hierarchies there may be, which means
-that a task's cgroup membership can't be described in finite length.
-The key may contain any varying number of entries and is unlimited in
-length, which makes it highly awkward to handle and leads to addition
-of controllers which exist only to identify membership, which in turn
-exacerbates the original problem.
-
-Also, as a controller can't have any expectation regarding what shape
-of hierarchies other controllers would be on, each controller has to
-assume that all other controllers are operating on completely
-orthogonal hierarchies.  This makes it impossible, or at least very
-cumbersome, for controllers to cooperate with each other.
-
-In most use cases, putting controllers on hierarchies which are
-completely orthogonal to each other isn't necessary.  What usually is
-called for is the ability to have differing levels of granularity
-depending on the specific controller.  In other words, hierarchy may
-be collapsed from leaf towards root when viewed from specific
-controllers.  For example, a given configuration might not care about
-how memory is distributed beyond a certain level while still wanting
-to control how CPU cycles are distributed.
-
-Unified hierarchy is the next version of cgroup interface.  It aims to
-address the aforementioned issues by having more structure while
-retaining enough flexibility for most use cases.  Various other
-general and controller-specific interface issues are also addressed in
-the process.
-
-
-2. Basic Operation
-
-2-1. Mounting
-
-Currently, unified hierarchy can be mounted with the following mount
-command.  Note that this is still under development and scheduled to
-change soon.
-
- mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT
-
-All controllers which support the unified hierarchy and are not bound
-to other hierarchies are automatically bound to unified hierarchy and
-show up at the root of it.  Controllers which are enabled only in the
-root of unified hierarchy can be bound to other hierarchies.  This
-allows mixing unified hierarchy with the traditional multiple
-hierarchies in a fully backward compatible way.
-
-A controller can be moved across hierarchies only after the controller
-is no longer referenced in its current hierarchy.  Because per-cgroup
-controller states are destroyed asynchronously and controllers may
-have lingering references, a controller may not show up immediately on
-the unified hierarchy after the final umount of the previous
-hierarchy.  Similarly, a controller should be fully disabled to be
-moved out of the unified hierarchy and it may take some time for the
-disabled controller to become available for other hierarchies;
-furthermore, due to dependencies among controllers, other controllers
-may need to be disabled too.
-
-While useful for development and manual configurations, dynamically
-moving controllers between the unified and other hierarchies is
-strongly discouraged for production use.  It is recommended to decide
-the hierarchies and controller associations before starting using the
-controllers.
-
-
-2-2. cgroup.subtree_control
-
-All cgroups on unified hierarchy have a "cgroup.subtree_control" file
-which governs which controllers are enabled on the children of the
-cgroup.  Let's assume a hierarchy like the following.
-
-  root - A - B - C
-               \ D
-
-root's "cgroup.subtree_control" file determines which controllers are
-enabled on A.  A's on B.  B's on C and D.  This coincides with the
-fact that controllers on the immediate sub-level are used to
-distribute the resources of the parent.  In fact, it's natural to
-assume that resource control knobs of a child belong to its parent.
-Enabling a controller in a "cgroup.subtree_control" file declares that
-distribution of the respective resources of the cgroup will be
-controlled.  Note that this means that controller enable states are
-shared among siblings.
-
-When read, the file contains a space-separated list of currently
-enabled controllers.  A write to the file should contain a
-space-separated list of controllers with '+' or '-' prefixed (without
-the quotes).  Controllers prefixed with '+' are enabled and '-'
-disabled.  If a controller is listed multiple times, the last entry
-wins.  The specific operations are executed atomically - either all
-succeed or fail.
-
-
-2-3. cgroup.controllers
-
-Read-only "cgroup.controllers" file contains a space-separated list of
-controllers which can be enabled in the cgroup's
-"cgroup.subtree_control" file.
-
-In the root cgroup, this lists controllers which are not bound to
-other hierarchies and the content changes as controllers are bound to
-and unbound from other hierarchies.
-
-In non-root cgroups, the content of this file equals that of the
-parent's "cgroup.subtree_control" file as only controllers enabled
-from the parent can be used in its children.
-
-
-3. Structural Constraints
-
-3-1. Top-down
-
-As it doesn't make sense to nest control of an uncontrolled resource,
-all non-root "cgroup.subtree_control" files can only contain
-controllers which are enabled in the parent's "cgroup.subtree_control"
-file.  A controller can be enabled only if the parent has the
-controller enabled and a controller can't be disabled if one or more
-children have it enabled.
-
-
-3-2. No internal tasks
-
-One long-standing issue that cgroup faces is the competition between
-tasks belonging to the parent cgroup and its children cgroups.  This
-is inherently nasty as two different types of entities compete and
-there is no agreed-upon obvious way to handle it.  Different
-controllers are doing different things.
-
-The cpu controller considers tasks and cgroups as equivalents and maps
-nice levels to cgroup weights.  This works for some cases but falls
-flat when children should be allocated specific ratios of CPU cycles
-and the number of internal tasks fluctuates - the ratios constantly
-change as the number of competing entities fluctuates.  There also are
-other issues.  The mapping from nice level to weight isn't obvious or
-universal, and there are various other knobs which simply aren't
-available for tasks.
-
-The io controller implicitly creates a hidden leaf node for each
-cgroup to host the tasks.  The hidden leaf has its own copies of all
-the knobs with "leaf_" prefixed.  While this allows equivalent control
-over internal tasks, it's with serious drawbacks.  It always adds an
-extra layer of nesting which may not be necessary, makes the interface
-messy and significantly complicates the implementation.
-
-The memory controller currently doesn't have a way to control what
-happens between internal tasks and child cgroups and the behavior is
-not clearly defined.  There have been attempts to add ad-hoc behaviors
-and knobs to tailor the behavior to specific workloads.  Continuing
-this direction will lead to problems which will be extremely difficult
-to resolve in the long term.
-
-Multiple controllers struggle with internal tasks and came up with
-different ways to deal with it; unfortunately, all the approaches in
-use now are severely flawed and, furthermore, the widely different
-behaviors make cgroup as whole highly inconsistent.
-
-It is clear that this is something which needs to be addressed from
-cgroup core proper in a uniform way so that controllers don't need to
-worry about it and cgroup as a whole shows a consistent and logical
-behavior.  To achieve that, unified hierarchy enforces the following
-structural constraint:
-
- Except for the root, only cgroups which don't contain any task may
- have controllers enabled in their "cgroup.subtree_control" files.
-
-Combined with other properties, this guarantees that, when a
-controller is looking at the part of the hierarchy which has it
-enabled, tasks are always only on the leaves.  This rules out
-situations where child cgroups compete against internal tasks of the
-parent.
-
-There are two things to note.  Firstly, the root cgroup is exempt from
-the restriction.  Root contains tasks and anonymous resource
-consumption which can't be associated with any other cgroup and
-requires special treatment from most controllers.  How resource
-consumption in the root cgroup is governed is up to each controller.
-
-Secondly, the restriction doesn't take effect if there is no enabled
-controller in the cgroup's "cgroup.subtree_control" file.  This is
-important as otherwise it wouldn't be possible to create children of a
-populated cgroup.  To control resource distribution of a cgroup, the
-cgroup must create children and transfer all its tasks to the children
-before enabling controllers in its "cgroup.subtree_control" file.
-
-
-4. Delegation
-
-4-1. Model of delegation
-
-A cgroup can be delegated to a less privileged user by granting write
-access of the directory and its "cgroup.procs" file to the user.  Note
-that the resource control knobs in a given directory concern the
-resources of the parent and thus must not be delegated along with the
-directory.
-
-Once delegated, the user can build sub-hierarchy under the directory,
-organize processes as it sees fit and further distribute the resources
-it got from the parent.  The limits and other settings of all resource
-controllers are hierarchical and regardless of what happens in the
-delegated sub-hierarchy, nothing can escape the resource restrictions
-imposed by the parent.
-
-Currently, cgroup doesn't impose any restrictions on the number of
-cgroups in or nesting depth of a delegated sub-hierarchy; however,
-this may in the future be limited explicitly.
-
-
-4-2. Common ancestor rule
-
-On the unified hierarchy, to write to a "cgroup.procs" file, in
-addition to the usual write permission to the file and uid match, the
-writer must also have write access to the "cgroup.procs" file of the
-common ancestor of the source and destination cgroups.  This prevents
-delegatees from smuggling processes across disjoint sub-hierarchies.
-
-Let's say cgroups C0 and C1 have been delegated to user U0 who created
-C00, C01 under C0 and C10 under C1 as follows.
-
- ~~~~~~~~~~~~~ - C0 - C00
- ~ cgroup    ~      \ C01
- ~ hierarchy ~
- ~~~~~~~~~~~~~ - C1 - C10
-
-C0 and C1 are separate entities in terms of resource distribution
-regardless of their relative positions in the hierarchy.  The
-resources the processes under C0 are entitled to are controlled by
-C0's ancestors and may be completely different from C1.  It's clear
-that the intention of delegating C0 to U0 is allowing U0 to organize
-the processes under C0 and further control the distribution of C0's
-resources.
-
-On traditional hierarchies, if a task has write access to "tasks" or
-"cgroup.procs" file of a cgroup and its uid agrees with the target, it
-can move the target to the cgroup.  In the above example, U0 will not
-only be able to move processes in each sub-hierarchy but also across
-the two sub-hierarchies, effectively allowing it to violate the
-organizational and resource restrictions implied by the hierarchical
-structure above C0 and C1.
-
-On the unified hierarchy, let's say U0 wants to write the pid of a
-process which has a matching uid and is currently in C10 into
-"C00/cgroup.procs".  U0 obviously has write access to the file and
-migration permission on the process; however, the common ancestor of
-the source cgroup C10 and the destination cgroup C00 is above the
-points of delegation and U0 would not have write access to its
-"cgroup.procs" and thus be denied with -EACCES.
-
-
-5. Other Changes
-
-5-1. [Un]populated Notification
-
-cgroup users often need a way to determine when a cgroup's
-subhierarchy becomes empty so that it can be cleaned up.  cgroup
-currently provides release_agent for it; unfortunately, this mechanism
-is riddled with issues.
-
-- It delivers events by forking and execing a userland binary
-  specified as the release_agent.  This is a long deprecated method of
-  notification delivery.  It's extremely heavy, slow and cumbersome to
-  integrate with larger infrastructure.
-
-- There is single monitoring point at the root.  There's no way to
-  delegate management of a subtree.
-
-- The event isn't recursive.  It triggers when a cgroup doesn't have
-  any tasks or child cgroups.  Events for internal nodes trigger only
-  after all children are removed.  This again makes it impossible to
-  delegate management of a subtree.
-
-- Events are filtered from the kernel side.  A "notify_on_release"
-  file is used to subscribe to or suppress release events.  This is
-  unnecessarily complicated and probably done this way because event
-  delivery itself was expensive.
-
-Unified hierarchy implements "populated" field in "cgroup.events"
-interface file which can be used to monitor whether the cgroup's
-subhierarchy has tasks in it or not.  Its value is 0 if there is no
-task in the cgroup and its descendants; otherwise, 1.  poll and
-[id]notify events are triggered when the value changes.
-
-This is significantly lighter and simpler and trivially allows
-delegating management of subhierarchy - subhierarchy monitoring can
-block further propagation simply by putting itself or another process
-in the subhierarchy and monitor events that it's interested in from
-there without interfering with monitoring higher in the tree.
-
-In unified hierarchy, the release_agent mechanism is no longer
-supported and the interface files "release_agent" and
-"notify_on_release" do not exist.
-
-
-5-2. Other Core Changes
-
-- None of the mount options is allowed.
-
-- remount is disallowed.
-
-- rename(2) is disallowed.
-
-- The "tasks" file is removed.  Everything should at process
-  granularity.  Use the "cgroup.procs" file instead.
-
-- The "cgroup.procs" file is not sorted.  pids will be unique unless
-  they got recycled in-between reads.
-
-- The "cgroup.clone_children" file is removed.
-
-- /proc/PID/cgroup keeps reporting the cgroup that a zombie belonged
-  to before exiting.  If the cgroup is removed before the zombie is
-  reaped, " (deleted)" is appeneded to the path.
-
-
-5-3. Controller File Conventions
-
-5-3-1. Format
-
-In general, all controller files should be in one of the following
-formats whenever possible.
-
-- Values only files
-
-  VAL0 VAL1...\n
-
-- Flat keyed files
-
-  KEY0 VAL0\n
-  KEY1 VAL1\n
-  ...
-
-- Nested keyed files
-
-  KEY0 SUB_KEY0=VAL00 SUB_KEY1=VAL01...
-  KEY1 SUB_KEY0=VAL10 SUB_KEY1=VAL11...
-  ...
-
-For a writeable file, the format for writing should generally match
-reading; however, controllers may allow omitting later fields or
-implement restricted shortcuts for most common use cases.
-
-For both flat and nested keyed files, only the values for a single key
-can be written at a time.  For nested keyed files, the sub key pairs
-may be specified in any order and not all pairs have to be specified.
-
-
-5-3-2. Control Knobs
-
-- Settings for a single feature should generally be implemented in a
-  single file.
-
-- In general, the root cgroup should be exempt from resource control
-  and thus shouldn't have resource control knobs.
-
-- If a controller implements ratio based resource distribution, the
-  control knob should be named "weight" and have the range [1, 10000]
-  and 100 should be the default value.  The values are chosen to allow
-  enough and symmetric bias in both directions while keeping it
-  intuitive (the default is 100%).
-
-- If a controller implements an absolute resource guarantee and/or
-  limit, the control knobs should be named "min" and "max"
-  respectively.  If a controller implements best effort resource
-  gurantee and/or limit, the control knobs should be named "low" and
-  "high" respectively.
-
-  In the above four control files, the special token "max" should be
-  used to represent upward infinity for both reading and writing.
-
-- If a setting has configurable default value and specific overrides,
-  the default settings should be keyed with "default" and appear as
-  the first entry in the file.  Specific entries can use "default" as
-  its value to indicate inheritance of the default value.
-
-- For events which are not very high frequency, an interface file
-  "events" should be created which lists event key value pairs.
-  Whenever a notifiable event happens, file modified event should be
-  generated on the file.
-
-
-5-4. Per-Controller Changes
-
-5-4-1. io
-
-- blkio is renamed to io.  The interface is overhauled anyway.  The
-  new name is more in line with the other two major controllers, cpu
-  and memory, and better suited given that it may be used for cgroup
-  writeback without involving block layer.
-
-- Everything including stat is always hierarchical making separate
-  recursive stat files pointless and, as no internal node can have
-  tasks, leaf weights are meaningless.  The operation model is
-  simplified and the interface is overhauled accordingly.
-
-  io.stat
-
-	The stat file.  The reported stats are from the point where
-	bio's are issued to request_queue.  The stats are counted
-	independent of which policies are enabled.  Each line in the
-	file follows the following format.  More fields may later be
-	added at the end.
-
-	  $MAJ:$MIN rbytes=$RBYTES wbytes=$WBYTES rios=$RIOS wrios=$WIOS
-
-  io.weight
-
-	The weight setting, currently only available and effective if
-	cfq-iosched is in use for the target device.  The weight is
-	between 1 and 10000 and defaults to 100.  The first line
-	always contains the default weight in the following format to
-	use when per-device setting is missing.
-
-	  default $WEIGHT
-
-	Subsequent lines list per-device weights of the following
-	format.
-
-	  $MAJ:$MIN $WEIGHT
-
-	Writing "$WEIGHT" or "default $WEIGHT" changes the default
-	setting.  Writing "$MAJ:$MIN $WEIGHT" sets per-device weight
-	while "$MAJ:$MIN default" clears it.
-
-	This file is available only on non-root cgroups.
-
-  io.max
-
-	The maximum bandwidth and/or iops setting, only available if
-	blk-throttle is enabled.  The file is of the following format.
-
-	  $MAJ:$MIN rbps=$RBPS wbps=$WBPS riops=$RIOPS wiops=$WIOPS
-
-	${R|W}BPS are read/write bytes per second and ${R|W}IOPS are
-	read/write IOs per second.  "max" indicates no limit.  Writing
-	to the file follows the same format but the individual
-	settings may be omitted or specified in any order.
-
-	This file is available only on non-root cgroups.
-
-
-5-4-2. cpuset
-
-- Tasks are kept in empty cpusets after hotplug and take on the masks
-  of the nearest non-empty ancestor, instead of being moved to it.
-
-- A task can be moved into an empty cpuset, and again it takes on the
-  masks of the nearest non-empty ancestor.
-
-
-5-4-3. memory
-
-- use_hierarchy is on by default and the cgroup file for the flag is
-  not created.
-
-- The original lower boundary, the soft limit, is defined as a limit
-  that is per default unset.  As a result, the set of cgroups that
-  global reclaim prefers is opt-in, rather than opt-out.  The costs
-  for optimizing these mostly negative lookups are so high that the
-  implementation, despite its enormous size, does not even provide the
-  basic desirable behavior.  First off, the soft limit has no
-  hierarchical meaning.  All configured groups are organized in a
-  global rbtree and treated like equal peers, regardless where they
-  are located in the hierarchy.  This makes subtree delegation
-  impossible.  Second, the soft limit reclaim pass is so aggressive
-  that it not just introduces high allocation latencies into the
-  system, but also impacts system performance due to overreclaim, to
-  the point where the feature becomes self-defeating.
-
-  The memory.low boundary on the other hand is a top-down allocated
-  reserve.  A cgroup enjoys reclaim protection when it and all its
-  ancestors are below their low boundaries, which makes delegation of
-  subtrees possible.  Secondly, new cgroups have no reserve per
-  default and in the common case most cgroups are eligible for the
-  preferred reclaim pass.  This allows the new low boundary to be
-  efficiently implemented with just a minor addition to the generic
-  reclaim code, without the need for out-of-band data structures and
-  reclaim passes.  Because the generic reclaim code considers all
-  cgroups except for the ones running low in the preferred first
-  reclaim pass, overreclaim of individual groups is eliminated as
-  well, resulting in much better overall workload performance.
-
-- The original high boundary, the hard limit, is defined as a strict
-  limit that can not budge, even if the OOM killer has to be called.
-  But this generally goes against the goal of making the most out of
-  the available memory.  The memory consumption of workloads varies
-  during runtime, and that requires users to overcommit.  But doing
-  that with a strict upper limit requires either a fairly accurate
-  prediction of the working set size or adding slack to the limit.
-  Since working set size estimation is hard and error prone, and
-  getting it wrong results in OOM kills, most users tend to err on the
-  side of a looser limit and end up wasting precious resources.
-
-  The memory.high boundary on the other hand can be set much more
-  conservatively.  When hit, it throttles allocations by forcing them
-  into direct reclaim to work off the excess, but it never invokes the
-  OOM killer.  As a result, a high boundary that is chosen too
-  aggressively will not terminate the processes, but instead it will
-  lead to gradual performance degradation.  The user can monitor this
-  and make corrections until the minimal memory footprint that still
-  gives acceptable performance is found.
-
-  In extreme cases, with many concurrent allocations and a complete
-  breakdown of reclaim progress within the group, the high boundary
-  can be exceeded.  But even then it's mostly better to satisfy the
-  allocation from the slack available in other groups or the rest of
-  the system than killing the group.  Otherwise, memory.max is there
-  to limit this type of spillover and ultimately contain buggy or even
-  malicious applications.
-
-- The original control file names are unwieldy and inconsistent in
-  many different ways.  For example, the upper boundary hit count is
-  exported in the memory.failcnt file, but an OOM event count has to
-  be manually counted by listening to memory.oom_control events, and
-  lower boundary / soft limit events have to be counted by first
-  setting a threshold for that value and then counting those events.
-  Also, usage and limit files encode their units in the filename.
-  That makes the filenames very long, even though this is not
-  information that a user needs to be reminded of every time they type
-  out those names.
-
-  To address these naming issues, as well as to signal clearly that
-  the new interface carries a new configuration model, the naming
-  conventions in it necessarily differ from the old interface.
-
-- The original limit files indicate the state of an unset limit with a
-  Very High Number, and a configured limit can be unset by echoing -1
-  into those files.  But that very high number is implementation and
-  architecture dependent and not very descriptive.  And while -1 can
-  be understood as an underflow into the highest possible value, -2 or
-  -10M etc. do not work, so it's not consistent.
-
-  memory.low, memory.high, and memory.max will use the string "max" to
-  indicate and set the highest possible value.
-
-6. Planned Changes
-
-6-1. CAP for resource control
-
-Unified hierarchy will require one of the capabilities(7), which is
-yet to be decided, for all resource control related knobs.  Process
-organization operations - creation of sub-cgroups and migration of
-processes in sub-hierarchies may be delegated by changing the
-ownership and/or permissions on the cgroup directory and
-"cgroup.procs" interface file; however, all operations which affect
-resource control - writes to a "cgroup.subtree_control" file or any
-controller-specific knobs - will require an explicit CAP privilege.
-
-This, in part, is to prevent the cgroup interface from being
-inadvertently promoted to programmable API used by non-privileged
-binaries.  cgroup exposes various aspects of the system in ways which
-aren't properly abstracted for direct consumption by regular programs.
-This is an administration interface much closer to sysctl knobs than
-system calls.  Even the basic access model, being filesystem path
-based, isn't suitable for direct consumption.  There's no way to
-access "my cgroup" in a race-free way or make multiple operations
-atomic against migration to another cgroup.
-
-Another aspect is that, for better or for worse, the cgroup interface
-goes through far less scrutiny than regular interfaces for
-unprivileged userland.  The upside is that cgroup is able to expose
-useful features which may not be suitable for general consumption in a
-reasonable time frame.  It provides a relatively short path between
-internal details and userland-visible interface.  Of course, this
-shortcut comes with high risk.  We go through what we go through for
-general kernel APIs for good reasons.  It may end up leaking internal
-details in a way which can exert significant pain by locking the
-kernel into a contract that can't be maintained in a reasonable
-manner.
-
-Also, due to the specific nature, cgroup and its controllers don't
-tend to attract attention from a wide scope of developers.  cgroup's
-short history is already fraught with severely mis-designed
-interfaces, unnecessary commitments to and exposing of internal
-details, broken and dangerous implementations of various features.
-
-Keeping cgroup as an administration interface is both advantageous for
-its role and imperative given its nature.  Some of the cgroup features
-may make sense for unprivileged access.  If deemed justified, those
-must be further abstracted and implemented as a different interface,
-be it a system call or process-private filesystem, and survive through
-the scrutiny that any interface for general consumption is required to
-go through.
-
-Requiring CAP is not a complete solution but should serve as a
-significant deterrent against spraying cgroup usages in non-privileged
-programs.

+ 199 - 42
Documentation/cpu-freq/intel-pstate.txt

@@ -1,61 +1,131 @@
-Intel P-state driver
+Intel P-State driver
 --------------------
 --------------------
 
 
-This driver provides an interface to control the P state selection for
-SandyBridge+ Intel processors.  The driver can operate two different
-modes based on the processor model, legacy mode and Hardware P state (HWP)
-mode.
-
-In legacy mode, the Intel P-state implements two internal governors,
-performance and powersave, that differ from the general cpufreq governors of
-the same name (the general cpufreq governors implement target(), whereas the
-internal Intel P-state governors implement setpolicy()).  The internal
-performance governor sets the max_perf_pct and min_perf_pct to 100; that is,
-the governor selects the highest available P state to maximize the performance
-of the core.  The internal powersave governor selects the appropriate P state
-based on the current load on the CPU.
-
-In HWP mode P state selection is implemented in the processor
-itself. The driver provides the interfaces between the cpufreq core and
-the processor to control P state selection based on user preferences
-and reporting frequency to the cpufreq core.  In this mode the
-internal Intel P-state governor code is disabled.
-
-In addition to the interfaces provided by the cpufreq core for
-controlling frequency the driver provides sysfs files for
-controlling P state selection. These files have been added to
-/sys/devices/system/cpu/intel_pstate/
-
-      max_perf_pct: limits the maximum P state that will be requested by
-      the driver stated as a percentage of the available performance. The
-      available (P states) performance may be reduced by the no_turbo
+This driver provides an interface to control the P-State selection for the
+SandyBridge+ Intel processors.
+
+The following document explains P-States:
+http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
+As stated in the document, P-State doesn’t exactly mean a frequency. However, for
+the sake of the relationship with cpufreq, P-State and frequency are used
+interchangeably.
+
+Understanding the cpufreq core governors and policies are important before
+discussing more details about the Intel P-State driver. Based on what callbacks
+a cpufreq driver provides to the cpufreq core, it can support two types of
+drivers:
+- with target_index() callback: In this mode, the drivers using cpufreq core
+simply provide the minimum and maximum frequency limits and an additional
+interface target_index() to set the current frequency. The cpufreq subsystem
+has a number of scaling governors ("performance", "powersave", "ondemand",
+etc.). Depending on which governor is in use, cpufreq core will call for
+transitions to a specific frequency using target_index() callback.
+- setpolicy() callback: In this mode, drivers do not provide target_index()
+callback, so cpufreq core can't request a transition to a specific frequency.
+The driver provides minimum and maximum frequency limits and callbacks to set a
+policy. The policy in cpufreq sysfs is referred to as the "scaling governor".
+The cpufreq core can request the driver to operate in any of the two policies:
+"performance: and "powersave". The driver decides which frequency to use based
+on the above policy selection considering minimum and maximum frequency limits.
+
+The Intel P-State driver falls under the latter category, which implements the
+setpolicy() callback. This driver decides what P-State to use based on the
+requested policy from the cpufreq core. If the processor is capable of
+selecting its next P-State internally, then the driver will offload this
+responsibility to the processor (aka HWP: Hardware P-States). If not, the
+driver implements algorithms to select the next P-State.
+
+Since these policies are implemented in the driver, they are not same as the
+cpufreq scaling governors implementation, even if they have the same name in
+the cpufreq sysfs (scaling_governors). For example the "performance" policy is
+similar to cpufreq’s "performance" governor, but "powersave" is completely
+different than the cpufreq "powersave" governor. The strategy here is similar
+to cpufreq "ondemand", where the requested P-State is related to the system load.
+
+Sysfs Interface
+
+In addition to the frequency-controlling interfaces provided by the cpufreq
+core, the driver provides its own sysfs files to control the P-State selection.
+These files have been added to /sys/devices/system/cpu/intel_pstate/.
+Any changes made to these files are applicable to all CPUs (even in a
+multi-package system).
+
+      max_perf_pct: Limits the maximum P-State that will be requested by
+      the driver. It states it as a percentage of the available performance. The
+      available (P-State) performance may be reduced by the no_turbo
       setting described below.
       setting described below.
 
 
-      min_perf_pct: limits the minimum P state that will be  requested by
-      the driver stated as a percentage of the max (non-turbo)
+      min_perf_pct: Limits the minimum P-State that will be requested by
+      the driver. It states it as a percentage of the max (non-turbo)
       performance level.
       performance level.
 
 
-      no_turbo: limits the driver to selecting P states below the turbo
+      no_turbo: Limits the driver to selecting P-State below the turbo
       frequency range.
       frequency range.
 
 
-      turbo_pct: displays the percentage of the total performance that
-      is supported by hardware that is in the turbo range.  This number
+      turbo_pct: Displays the percentage of the total performance that
+      is supported by hardware that is in the turbo range. This number
       is independent of whether turbo has been disabled or not.
       is independent of whether turbo has been disabled or not.
 
 
-      num_pstates: displays the number of pstates that are supported
-      by hardware.  This number is independent of whether turbo has
+      num_pstates: Displays the number of P-States that are supported
+      by hardware. This number is independent of whether turbo has
       been disabled or not.
       been disabled or not.
 
 
+For example, if a system has these parameters:
+	Max 1 core turbo ratio: 0x21 (Max 1 core ratio is the maximum P-State)
+	Max non turbo ratio: 0x17
+	Minimum ratio : 0x08 (Here the ratio is called max efficiency ratio)
+
+Sysfs will show :
+	max_perf_pct:100, which corresponds to 1 core ratio
+	min_perf_pct:24, max_efficiency_ratio / max 1 Core ratio
+	no_turbo:0, turbo is not disabled
+	num_pstates:26 = (max 1 Core ratio - Max Efficiency Ratio + 1)
+	turbo_pct:39 = (max 1 core ratio - max non turbo ratio) / num_pstates
+
+Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual
+Volume 3: System Programming Guide" to understand ratios.
+
+cpufreq sysfs for Intel P-State
+
+Since this driver registers with cpufreq, cpufreq sysfs is also presented.
+There are some important differences, which need to be considered.
+
+scaling_cur_freq: This displays the real frequency which was used during
+the last sample period instead of what is requested. Some other cpufreq driver,
+like acpi-cpufreq, displays what is requested (Some changes are on the
+way to fix this for acpi-cpufreq driver). The same is true for frequencies
+displayed at /proc/cpuinfo.
+
+scaling_governor: This displays current active policy. Since each CPU has a
+cpufreq sysfs, it is possible to set a scaling governor to each CPU. But this
+is not possible with Intel P-States, as there is one common policy for all
+CPUs. Here, the last requested policy will be applicable to all CPUs. It is
+suggested that one use the cpupower utility to change policy to all CPUs at the
+same time.
+
+scaling_setspeed: This attribute can never be used with Intel P-State.
+
+scaling_max_freq/scaling_min_freq: This interface can be used similarly to
+the max_perf_pct/min_perf_pct of Intel P-State sysfs. However since frequencies
+are converted to nearest possible P-State, this is prone to rounding errors.
+This method is not preferred to limit performance.
+
+affected_cpus: Not used
+related_cpus: Not used
+
 For contemporary Intel processors, the frequency is controlled by the
 For contemporary Intel processors, the frequency is controlled by the
-processor itself and the P-states exposed to software are related to
+processor itself and the P-State exposed to software is related to
 performance levels.  The idea that frequency can be set to a single
 performance levels.  The idea that frequency can be set to a single
-frequency is fiction for Intel Core processors. Even if the scaling
-driver selects a single P state the actual frequency the processor
+frequency is fictional for Intel Core processors. Even if the scaling
+driver selects a single P-State, the actual frequency the processor
 will run at is selected by the processor itself.
 will run at is selected by the processor itself.
 
 
-For legacy mode debugfs files have also been added to allow tuning of
-the internal governor algorythm. These files are located at
-/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode.
+Tuning Intel P-State driver
+
+When HWP mode is not used, debugfs files have also been added to allow the
+tuning of the internal governor algorithm. These files are located at
+/sys/kernel/debug/pstate_snb/. The algorithm uses a PID (Proportional
+Integral Derivative) controller. The PID tunable parameters are:
 
 
       deadband
       deadband
       d_gain_pct
       d_gain_pct
@@ -63,3 +133,90 @@ the internal governor algorythm. These files are located at
       p_gain_pct
       p_gain_pct
       sample_rate_ms
       sample_rate_ms
       setpoint
       setpoint
+
+To adjust these parameters, some understanding of driver implementation is
+necessary. There are some tweeks described here, but be very careful. Adjusting
+them requires expert level understanding of power and performance relationship.
+These limits are only useful when the "powersave" policy is active.
+
+-To make the system more responsive to load changes, sample_rate_ms can
+be adjusted  (current default is 10ms).
+-To make the system use higher performance, even if the load is lower, setpoint
+can be adjusted to a lower number. This will also lead to faster ramp up time
+to reach the maximum P-State.
+If there are no derivative and integral coefficients, The next P-State will be
+equal to:
+	current P-State - ((setpoint - current cpu load) * p_gain_pct)
+
+For example, if the current PID parameters are (Which are defaults for the core
+processors like SandyBridge):
+      deadband = 0
+      d_gain_pct = 0
+      i_gain_pct = 0
+      p_gain_pct = 20
+      sample_rate_ms = 10
+      setpoint = 97
+
+If the current P-State = 0x08 and current load = 100, this will result in the
+next P-State = 0x08 - ((97 - 100) * 0.2) = 8.6 (rounded to 9). Here the P-State
+goes up by only 1. If during next sample interval the current load doesn't
+change and still 100, then P-State goes up by one again. This process will
+continue as long as the load is more than the setpoint until the maximum P-State
+is reached.
+
+For the same load at setpoint = 60, this will result in the next P-State
+= 0x08 - ((60 - 100) * 0.2) = 16
+So by changing the setpoint from 97 to 60, there is an increase of the
+next P-State from 9 to 16. So this will make processor execute at higher
+P-State for the same CPU load. If the load continues to be more than the
+setpoint during next sample intervals, then P-State will go up again till the
+maximum P-State is reached. But the ramp up time to reach the maximum P-State
+will be much faster when the setpoint is 60 compared to 97.
+
+Debugging Intel P-State driver
+
+Event tracing
+To debug P-State transition, the Linux event tracing interface can be used.
+There are two specific events, which can be enabled (Provided the kernel
+configs related to event tracing are enabled).
+
+# cd /sys/kernel/debug/tracing/
+# echo 1 > events/power/pstate_sample/enable
+# echo 1 > events/power/cpu_frequency/enable
+# cat trace
+gnome-terminal--4510  [001] ..s.  1177.680733: pstate_sample: core_busy=107
+	scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618
+		freq=2474476
+cat-5235  [002] ..s.  1177.681723: cpu_frequency: state=2900000 cpu_id=2
+
+
+Using ftrace
+
+If function level tracing is required, the Linux ftrace interface can be used.
+For example if we want to check how often a function to set a P-State is
+called, we can set ftrace filter to intel_pstate_set_pstate.
+
+# cd /sys/kernel/debug/tracing/
+# cat available_filter_functions | grep -i pstate
+intel_pstate_set_pstate
+intel_pstate_cpu_init
+...
+
+# echo intel_pstate_set_pstate > set_ftrace_filter
+# echo function > current_tracer
+# cat trace | head -15
+# tracer: function
+#
+# entries-in-buffer/entries-written: 80/80   #P:4
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+            Xorg-3129  [000] ..s.  2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func
+ gnome-terminal--4510  [002] ..s.  2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func
+     gnome-shell-3409  [001] ..s.  2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
+          <idle>-0     [000] ..s.  2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func

+ 2 - 2
Documentation/cpu-freq/pcc-cpufreq.txt

@@ -159,8 +159,8 @@ to be strictly associated with a P-state.
 
 
 2.2 cpuinfo_transition_latency:
 2.2 cpuinfo_transition_latency:
 -------------------------------
 -------------------------------
-The cpuinfo_transition_latency field is 0. The PCC specification does
-not include a field to expose this value currently.
+The cpuinfo_transition_latency field is CPUFREQ_ETERNAL. The PCC specification
+does not include a field to expose this value currently.
 
 
 2.3 cpuinfo_cur_freq:
 2.3 cpuinfo_cur_freq:
 ---------------------
 ---------------------

+ 17 - 0
Documentation/devicetree/bindings/arm/cpus.txt

@@ -242,6 +242,23 @@ nodes to be present and contain the properties described below.
 		Definition: Specifies the syscon node controlling the cpu core
 		Definition: Specifies the syscon node controlling the cpu core
 			    power domains.
 			    power domains.
 
 
+	- dynamic-power-coefficient
+		Usage: optional
+		Value type: <prop-encoded-array>
+		Definition: A u32 value that represents the running time dynamic
+			    power coefficient in units of mW/MHz/uVolt^2. The
+			    coefficient can either be calculated from power
+			    measurements or derived by analysis.
+
+			    The dynamic power consumption of the CPU  is
+			    proportional to the square of the Voltage (V) and
+			    the clock frequency (f). The coefficient is used to
+			    calculate the dynamic power as below -
+
+			    Pdyn = dynamic-power-coefficient * V^2 * f
+
+			    where voltage is in uV, frequency is in MHz.
+
 Example 1 (dual-cluster big.LITTLE system 32-bit):
 Example 1 (dual-cluster big.LITTLE system 32-bit):
 
 
 	cpus {
 	cpus {

+ 91 - 0
Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt

@@ -0,0 +1,91 @@
+Binding for ST's CPUFreq driver
+===============================
+
+ST's CPUFreq driver attempts to read 'process' and 'version' attributes
+from the SoC, then supplies the OPP framework with 'prop' and 'supported
+hardware' information respectively.  The framework is then able to read
+the DT and operate in the usual way.
+
+For more information about the expected DT format [See: ../opp/opp.txt].
+
+Frequency Scaling only
+----------------------
+
+No vendor specific driver required for this.
+
+Located in CPU's node:
+
+- operating-points		: [See: ../power/opp.txt]
+
+Example [safe]
+--------------
+
+cpus {
+	cpu@0 {
+				 /* kHz     uV   */
+		operating-points = <1500000 0
+				    1200000 0
+				    800000  0
+				    500000  0>;
+	};
+};
+
+Dynamic Voltage and Frequency Scaling (DVFS)
+--------------------------------------------
+
+This requires the ST CPUFreq driver to supply 'process' and 'version' info.
+
+Located in CPU's node:
+
+- operating-points-v2		: [See ../power/opp.txt]
+
+Example [unsafe]
+----------------
+
+cpus {
+	cpu@0 {
+		operating-points-v2	= <&cpu0_opp_table>;
+	};
+};
+
+cpu0_opp_table: opp_table {
+	compatible = "operating-points-v2";
+
+	/* ############################################################### */
+	/* # WARNING: Do not attempt to copy/replicate these nodes,      # */
+	/* #          they are only to be supplied by the bootloader !!! # */
+	/* ############################################################### */
+	opp0 {
+		/*			   Major       Minor       Substrate */
+		/*			   2           all         all       */
+		opp-supported-hw	= <0x00000004  0xffffffff  0xffffffff>;
+		opp-hz			= /bits/ 64 <1500000000>;
+		clock-latency-ns	= <10000000>;
+
+		opp-microvolt-pcode0	= <1200000>;
+		opp-microvolt-pcode1	= <1200000>;
+		opp-microvolt-pcode2	= <1200000>;
+		opp-microvolt-pcode3	= <1200000>;
+		opp-microvolt-pcode4	= <1170000>;
+		opp-microvolt-pcode5	= <1140000>;
+		opp-microvolt-pcode6	= <1100000>;
+		opp-microvolt-pcode7	= <1070000>;
+	};
+
+	opp1 {
+		/*			   Major       Minor       Substrate */
+		/*			   all         all         all       */
+		opp-supported-hw	= <0xffffffff  0xffffffff  0xffffffff>;
+		opp-hz			= /bits/ 64 <1200000000>;
+		clock-latency-ns	= <10000000>;
+
+		opp-microvolt-pcode0	= <1110000>;
+		opp-microvolt-pcode1	= <1150000>;
+		opp-microvolt-pcode2	= <1100000>;
+		opp-microvolt-pcode3	= <1080000>;
+		opp-microvolt-pcode4	= <1040000>;
+		opp-microvolt-pcode5	= <1020000>;
+		opp-microvolt-pcode6	= <980000>;
+		opp-microvolt-pcode7	= <930000>;
+	};
+};

+ 93 - 39
Documentation/devicetree/bindings/opp/opp.txt

@@ -45,21 +45,10 @@ Devices supporting OPPs must set their "operating-points-v2" property with
 phandle to a OPP table in their DT node. The OPP core will use this phandle to
 phandle to a OPP table in their DT node. The OPP core will use this phandle to
 find the operating points for the device.
 find the operating points for the device.
 
 
-Devices may want to choose OPP tables at runtime and so can provide a list of
-phandles here. But only *one* of them should be chosen at runtime. This must be
-accompanied by a corresponding "operating-points-names" property, to uniquely
-identify the OPP tables.
-
 If required, this can be extended for SoC vendor specfic bindings. Such bindings
 If required, this can be extended for SoC vendor specfic bindings. Such bindings
 should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
 should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
 and should have a compatible description like: "operating-points-v2-<vendor>".
 and should have a compatible description like: "operating-points-v2-<vendor>".
 
 
-Optional properties:
-- operating-points-names: Names of OPP tables (required if multiple OPP
-  tables are present), to uniquely identify them. The same list must be present
-  for all the CPUs which are sharing clock/voltage rails and hence the OPP
-  tables.
-
 * OPP Table Node
 * OPP Table Node
 
 
 This describes the OPPs belonging to a device. This node can have following
 This describes the OPPs belonging to a device. This node can have following
@@ -100,6 +89,14 @@ Optional properties:
   Entries for multiple regulators must be present in the same order as
   Entries for multiple regulators must be present in the same order as
   regulators are specified in device's DT node.
   regulators are specified in device's DT node.
 
 
+- opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
+  the above opp-microvolt property, but allows multiple voltage ranges to be
+  provided for the same OPP. At runtime, the platform can pick a <name> and
+  matching opp-microvolt-<name> property will be enabled for all OPPs. If the
+  platform doesn't pick a specific <name> or the <name> doesn't match with any
+  opp-microvolt-<name> properties, then opp-microvolt property shall be used, if
+  present.
+
 - opp-microamp: The maximum current drawn by the device in microamperes
 - opp-microamp: The maximum current drawn by the device in microamperes
   considering system specific parameters (such as transients, process, aging,
   considering system specific parameters (such as transients, process, aging,
   maximum operating temperature range etc.) as necessary. This may be used to
   maximum operating temperature range etc.) as necessary. This may be used to
@@ -112,6 +109,9 @@ Optional properties:
   for few regulators, then this should be marked as zero for them. If it isn't
   for few regulators, then this should be marked as zero for them. If it isn't
   required for any regulator, then this property need not be present.
   required for any regulator, then this property need not be present.
 
 
+- opp-microamp-<name>: Named opp-microamp property. Similar to
+  opp-microvolt-<name> property, but for microamp instead.
+
 - clock-latency-ns: Specifies the maximum possible transition latency (in
 - clock-latency-ns: Specifies the maximum possible transition latency (in
   nanoseconds) for switching to this OPP from any other OPP.
   nanoseconds) for switching to this OPP from any other OPP.
 
 
@@ -123,6 +123,26 @@ Optional properties:
 - opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
 - opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
   the table should have this.
   the table should have this.
 
 
+- opp-supported-hw: This enables us to select only a subset of OPPs from the
+  larger OPP table, based on what version of the hardware we are running on. We
+  still can't have multiple nodes with the same opp-hz value in OPP table.
+
+  It's an user defined array containing a hierarchy of hardware version numbers,
+  supported by the OPP. For example: a platform with hierarchy of three levels
+  of versions (A, B and C), this field should be like <X Y Z>, where X
+  corresponds to Version hierarchy A, Y corresponds to version hierarchy B and Z
+  corresponds to version hierarchy C.
+
+  Each level of hierarchy is represented by a 32 bit value, and so there can be
+  only 32 different supported version per hierarchy. i.e. 1 bit per version. A
+  value of 0xFFFFFFFF will enable the OPP for all versions for that hierarchy
+  level. And a value of 0x00000000 will disable the OPP completely, and so we
+  never want that to happen.
+
+  If 32 values aren't sufficient for a version hierarchy, than that version
+  hierarchy can be contained in multiple 32 bit values. i.e. <X Y Z1 Z2> in the
+  above example, Z1 & Z2 refer to the version hierarchy Z.
+
 - status: Marks the node enabled/disabled.
 - status: Marks the node enabled/disabled.
 
 
 Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
 Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
@@ -157,20 +177,20 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		opp-shared;
 		opp-shared;
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microamp = <70000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 			opp-suspend;
 		};
 		};
-		opp01 {
+		opp@1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microamp = <80000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 			clock-latency-ns = <310000>;
 		};
 		};
-		opp02 {
+		opp@1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			opp-microvolt = <1025000>;
 			clock-latency-ns = <290000>;
 			clock-latency-ns = <290000>;
@@ -236,20 +256,20 @@ independently.
 		 * independently.
 		 * independently.
 		 */
 		 */
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microamp = <70000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 			opp-suspend;
 		};
 		};
-		opp01 {
+		opp@1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microamp = <80000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 			clock-latency-ns = <310000>;
 		};
 		};
-		opp02 {
+		opp@1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			opp-microvolt = <1025000>;
 			opp-microamp = <90000;
 			opp-microamp = <90000;
@@ -312,20 +332,20 @@ DVFS state together.
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		opp-shared;
 		opp-shared;
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microvolt = <970000 975000 985000>;
 			opp-microamp = <70000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 			opp-suspend;
 		};
 		};
-		opp01 {
+		opp@1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microvolt = <980000 1000000 1010000>;
 			opp-microamp = <80000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 			clock-latency-ns = <310000>;
 		};
 		};
-		opp02 {
+		opp@1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			opp-microvolt = <1025000>;
 			opp-microamp = <90000>;
 			opp-microamp = <90000>;
@@ -338,20 +358,20 @@ DVFS state together.
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		opp-shared;
 		opp-shared;
 
 
-		opp10 {
+		opp@1300000000 {
 			opp-hz = /bits/ 64 <1300000000>;
 			opp-hz = /bits/ 64 <1300000000>;
 			opp-microvolt = <1045000 1050000 1055000>;
 			opp-microvolt = <1045000 1050000 1055000>;
 			opp-microamp = <95000>;
 			opp-microamp = <95000>;
 			clock-latency-ns = <400000>;
 			clock-latency-ns = <400000>;
 			opp-suspend;
 			opp-suspend;
 		};
 		};
-		opp11 {
+		opp@1400000000 {
 			opp-hz = /bits/ 64 <1400000000>;
 			opp-hz = /bits/ 64 <1400000000>;
 			opp-microvolt = <1075000>;
 			opp-microvolt = <1075000>;
 			opp-microamp = <100000>;
 			opp-microamp = <100000>;
 			clock-latency-ns = <400000>;
 			clock-latency-ns = <400000>;
 		};
 		};
-		opp12 {
+		opp@1500000000 {
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-microvolt = <1010000 1100000 1110000>;
 			opp-microvolt = <1010000 1100000 1110000>;
 			opp-microamp = <95000>;
 			opp-microamp = <95000>;
@@ -378,7 +398,7 @@ Example 4: Handling multiple regulators
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		opp-shared;
 		opp-shared;
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000>, /* Supply 0 */
 			opp-microvolt = <970000>, /* Supply 0 */
 					<960000>, /* Supply 1 */
 					<960000>, /* Supply 1 */
@@ -391,7 +411,7 @@ Example 4: Handling multiple regulators
 
 
 		/* OR */
 		/* OR */
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
 			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
 					<960000 965000 975000>, /* Supply 1 */
 					<960000 965000 975000>, /* Supply 1 */
@@ -404,7 +424,7 @@ Example 4: Handling multiple regulators
 
 
 		/* OR */
 		/* OR */
 
 
-		opp00 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
 			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
 					<960000 965000 975000>, /* Supply 1 */
 					<960000 965000 975000>, /* Supply 1 */
@@ -417,7 +437,8 @@ Example 4: Handling multiple regulators
 	};
 	};
 };
 };
 
 
-Example 5: Multiple OPP tables
+Example 5: opp-supported-hw
+(example: three level hierarchy of versions: cuts, substrate and process)
 
 
 / {
 / {
 	cpus {
 	cpus {
@@ -426,40 +447,73 @@ Example 5: Multiple OPP tables
 			...
 			...
 
 
 			cpu-supply = <&cpu_supply>
 			cpu-supply = <&cpu_supply>
-			operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
-			operating-points-names = "slow", "fast";
+			operating-points-v2 = <&cpu0_opp_table_slow>;
 		};
 		};
 	};
 	};
 
 
-	cpu0_opp_table_slow: opp_table_slow {
+	opp_table {
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		status = "okay";
 		status = "okay";
 		opp-shared;
 		opp-shared;
 
 
-		opp00 {
+		opp@600000000 {
+			/*
+			 * Supports all substrate and process versions for 0xF
+			 * cuts, i.e. only first four cuts.
+			 */
+			opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
 			opp-hz = /bits/ 64 <600000000>;
 			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <900000 915000 925000>;
 			...
 			...
 		};
 		};
 
 
-		opp01 {
+		opp@800000000 {
+			/*
+			 * Supports:
+			 * - cuts: only one, 6th cut (represented by 6th bit).
+			 * - substrate: supports 16 different substrate versions
+			 * - process: supports 9 different process versions
+			 */
+			opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
 			opp-hz = /bits/ 64 <800000000>;
 			opp-hz = /bits/ 64 <800000000>;
+			opp-microvolt = <900000 915000 925000>;
 			...
 			...
 		};
 		};
 	};
 	};
+};
+
+Example 6: opp-microvolt-<name>, opp-microamp-<name>:
+(example: device with two possible microvolt ranges: slow and fast)
 
 
-	cpu0_opp_table_fast: opp_table_fast {
+/ {
+	cpus {
+		cpu@0 {
+			compatible = "arm,cortex-a7";
+			...
+
+			operating-points-v2 = <&cpu0_opp_table>;
+		};
+	};
+
+	cpu0_opp_table: opp_table0 {
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
-		status = "okay";
 		opp-shared;
 		opp-shared;
 
 
-		opp10 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
-			...
+			opp-microvolt-slow = <900000 915000 925000>;
+			opp-microvolt-fast = <970000 975000 985000>;
+			opp-microamp-slow =  <70000>;
+			opp-microamp-fast =  <71000>;
 		};
 		};
 
 
-		opp11 {
-			opp-hz = /bits/ 64 <1100000000>;
-			...
+		opp@1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */
+					      <910000 925000 935000>; /* Supply vcc1 */
+			opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */
+					     <960000 965000 975000>; /* Supply vcc1 */
+			opp-microamp =  <70000>; /* Will be used for both slow/fast */
 		};
 		};
 	};
 	};
 };
 };

+ 17 - 0
Documentation/networking/mac80211-injection.txt

@@ -28,6 +28,23 @@ radiotap headers and used to control injection:
    IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
    IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
 				  an ACK even if it is a unicast frame
 				  an ACK even if it is a unicast frame
 
 
+ * IEEE80211_RADIOTAP_RATE
+
+   legacy rate for the transmission (only for devices without own rate control)
+
+ * IEEE80211_RADIOTAP_MCS
+
+   HT rate for the transmission (only for devices without own rate control).
+   Also some flags are parsed
+
+   IEEE80211_TX_RC_SHORT_GI: use short guard interval
+   IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode
+
+ * IEEE80211_RADIOTAP_DATA_RETRIES
+
+   number of retries when either IEEE80211_RADIOTAP_RATE or
+   IEEE80211_RADIOTAP_MCS was used
+
 The injection code can also skip all other currently defined radiotap fields
 The injection code can also skip all other currently defined radiotap fields
 facilitating replay of captured radiotap headers directly.
 facilitating replay of captured radiotap headers directly.
 
 

+ 1 - 1
Documentation/power/pci.txt

@@ -999,7 +999,7 @@ from its probe routine to make runtime PM work for the device.
 
 
 It is important to remember that the driver's runtime_suspend() callback
 It is important to remember that the driver's runtime_suspend() callback
 may be executed right after the usage counter has been decremented, because
 may be executed right after the usage counter has been decremented, because
-user space may already have cuased the pm_runtime_allow() helper function
+user space may already have caused the pm_runtime_allow() helper function
 unblocking the runtime PM of the device to run via sysfs, so the driver must
 unblocking the runtime PM of the device to run via sysfs, so the driver must
 be prepared to cope with that.
 be prepared to cope with that.
 
 

+ 6 - 0
Documentation/power/runtime_pm.txt

@@ -371,6 +371,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
     - increment the device's usage counter, run pm_runtime_resume(dev) and
     - increment the device's usage counter, run pm_runtime_resume(dev) and
       return its result
       return its result
 
 
+  int pm_runtime_get_if_in_use(struct device *dev);
+    - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
+      runtime PM status is RPM_ACTIVE and the runtime PM usage counter is
+      nonzero, increment the counter and return 1; otherwise return 0 without
+      changing the counter
+
   void pm_runtime_put_noidle(struct device *dev);
   void pm_runtime_put_noidle(struct device *dev);
     - decrement the device's usage counter
     - decrement the device's usage counter
 
 

+ 2 - 0
Documentation/rfkill.txt

@@ -83,6 +83,8 @@ rfkill drivers that control devices that can be hard-blocked unless they also
 assign the poll_hw_block() callback (then the rfkill core will poll the
 assign the poll_hw_block() callback (then the rfkill core will poll the
 device). Don't do this unless you cannot get the event in any other way.
 device). Don't do this unless you cannot get the event in any other way.
 
 
+RFKill provides per-switch LED triggers, which can be used to drive LEDs
+according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
 
 
 
 
 5. Userspace support
 5. Userspace support

+ 11 - 0
MAINTAINERS

@@ -8466,6 +8466,17 @@ F:	fs/timerfd.c
 F:	include/linux/timer*
 F:	include/linux/timer*
 F:	kernel/time/*timer*
 F:	kernel/time/*timer*
 
 
+POWER MANAGEMENT CORE
+M:	"Rafael J. Wysocki" <rjw@rjwysocki.net>
+L:	linux-pm@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+S:	Supported
+F:	drivers/base/power/
+F:	include/linux/pm.h
+F:	include/linux/pm_*
+F:	include/linux/powercap.h
+F:	drivers/powercap/
+
 POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
 POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
 M:	Sebastian Reichel <sre@kernel.org>
 M:	Sebastian Reichel <sre@kernel.org>
 M:	Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
 M:	Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>

+ 14 - 14
arch/arm/boot/dts/exynos4412.dtsi

@@ -64,73 +64,73 @@
 		compatible = "operating-points-v2";
 		compatible = "operating-points-v2";
 		opp-shared;
 		opp-shared;
 
 
-		opp00 {
+		opp@200000000 {
 			opp-hz = /bits/ 64 <200000000>;
 			opp-hz = /bits/ 64 <200000000>;
 			opp-microvolt = <900000>;
 			opp-microvolt = <900000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp01 {
+		opp@300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			opp-hz = /bits/ 64 <300000000>;
 			opp-microvolt = <900000>;
 			opp-microvolt = <900000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp02 {
+		opp@400000000 {
 			opp-hz = /bits/ 64 <400000000>;
 			opp-hz = /bits/ 64 <400000000>;
 			opp-microvolt = <925000>;
 			opp-microvolt = <925000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp03 {
+		opp@500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			opp-hz = /bits/ 64 <500000000>;
 			opp-microvolt = <950000>;
 			opp-microvolt = <950000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp04 {
+		opp@600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <975000>;
 			opp-microvolt = <975000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp05 {
+		opp@700000000 {
 			opp-hz = /bits/ 64 <700000000>;
 			opp-hz = /bits/ 64 <700000000>;
 			opp-microvolt = <987500>;
 			opp-microvolt = <987500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp06 {
+		opp@800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			opp-hz = /bits/ 64 <800000000>;
 			opp-microvolt = <1000000>;
 			opp-microvolt = <1000000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 			opp-suspend;
 			opp-suspend;
 		};
 		};
-		opp07 {
+		opp@900000000 {
 			opp-hz = /bits/ 64 <900000000>;
 			opp-hz = /bits/ 64 <900000000>;
 			opp-microvolt = <1037500>;
 			opp-microvolt = <1037500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp08 {
+		opp@1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <1087500>;
 			opp-microvolt = <1087500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp09 {
+		opp@1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <1137500>;
 			opp-microvolt = <1137500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp10 {
+		opp@1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1187500>;
 			opp-microvolt = <1187500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp11 {
+		opp@1300000000 {
 			opp-hz = /bits/ 64 <1300000000>;
 			opp-hz = /bits/ 64 <1300000000>;
 			opp-microvolt = <1250000>;
 			opp-microvolt = <1250000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp12 {
+		opp@1400000000 {
 			opp-hz = /bits/ 64 <1400000000>;
 			opp-hz = /bits/ 64 <1400000000>;
 			opp-microvolt = <1287500>;
 			opp-microvolt = <1287500>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
 		};
 		};
-		opp13 {
+		opp@1500000000 {
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-microvolt = <1350000>;
 			opp-microvolt = <1350000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;

+ 10 - 7
arch/arm/mach-tegra/board-paz00.c

@@ -17,23 +17,25 @@
  *
  *
  */
  */
 
 
+#include <linux/property.h>
 #include <linux/gpio/machine.h>
 #include <linux/gpio/machine.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
-#include <linux/rfkill-gpio.h>
 
 
 #include "board.h"
 #include "board.h"
 
 
-static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
-	.name	= "wifi_rfkill",
-	.type	= RFKILL_TYPE_WLAN,
+static struct property_entry __initdata wifi_rfkill_prop[] = {
+	PROPERTY_ENTRY_STRING("name", "wifi_rfkill"),
+	PROPERTY_ENTRY_STRING("type", "wlan"),
+	{ },
+};
+
+static struct property_set __initdata wifi_rfkill_pset = {
+	.properties = wifi_rfkill_prop,
 };
 };
 
 
 static struct platform_device wifi_rfkill_device = {
 static struct platform_device wifi_rfkill_device = {
 	.name	= "rfkill_gpio",
 	.name	= "rfkill_gpio",
 	.id	= -1,
 	.id	= -1,
-	.dev	= {
-		.platform_data = &wifi_rfkill_platform_data,
-	},
 };
 };
 
 
 static struct gpiod_lookup_table wifi_gpio_lookup = {
 static struct gpiod_lookup_table wifi_gpio_lookup = {
@@ -47,6 +49,7 @@ static struct gpiod_lookup_table wifi_gpio_lookup = {
 
 
 void __init tegra_paz00_wifikill_init(void)
 void __init tegra_paz00_wifikill_init(void)
 {
 {
+	platform_device_add_properties(&wifi_rfkill_device, &wifi_rfkill_pset);
 	gpiod_add_lookup_table(&wifi_gpio_lookup);
 	gpiod_add_lookup_table(&wifi_gpio_lookup);
 	platform_device_register(&wifi_rfkill_device);
 	platform_device_register(&wifi_rfkill_device);
 }
 }

+ 5 - 7
arch/ia64/kernel/ftrace.c

@@ -97,13 +97,11 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 
 
 	/*
 	/*
-	 * Note: Due to modules and __init, code can
-	 *  disappear and change, we need to protect against faulting
-	 *  as well as code changing. We do this by using the
-	 *  probe_kernel_* functions.
-	 *
-	 * No real locking needed, this code is run through
-	 * kstop_machine, or before SMP starts.
+	 * Note:
+	 * We are paranoid about modifying text, as if a bug was to happen, it
+	 * could cause us to read or write to someplace that could cause harm.
+	 * Carefully read and modify the code with probe_kernel_*(), and make
+	 * sure what we read is what we expected it to be before modifying it.
 	 */
 	 */
 
 
 	if (!do_check)
 	if (!do_check)

+ 5 - 6
arch/metag/kernel/ftrace.c

@@ -54,12 +54,11 @@ static int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 
 
 	/*
 	/*
-	 * Note: Due to modules and __init, code can
-	 *  disappear and change, we need to protect against faulting
-	 *  as well as code changing.
-	 *
-	 * No real locking needed, this code is run through
-	 * kstop_machine.
+	 * Note:
+	 * We are paranoid about modifying text, as if a bug was to happen, it
+	 * could cause us to read or write to someplace that could cause harm.
+	 * Carefully read and modify the code with probe_kernel_*(), and make
+	 * sure what we read is what we expected it to be before modifying it.
 	 */
 	 */
 
 
 	/* read the text we want to modify */
 	/* read the text we want to modify */

+ 5 - 7
arch/sh/kernel/ftrace.c

@@ -212,13 +212,11 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 
 
 	/*
 	/*
-	 * Note: Due to modules and __init, code can
-	 *  disappear and change, we need to protect against faulting
-	 *  as well as code changing. We do this by using the
-	 *  probe_kernel_* functions.
-	 *
-	 * No real locking needed, this code is run through
-	 * kstop_machine, or before SMP starts.
+	 * Note:
+	 * We are paranoid about modifying text, as if a bug was to happen, it
+	 * could cause us to read or write to someplace that could cause harm.
+	 * Carefully read and modify the code with probe_kernel_*(), and make
+	 * sure what we read is what we expected it to be before modifying it.
 	 */
 	 */
 
 
 	/* read the text we want to modify */
 	/* read the text we want to modify */

+ 2 - 1
arch/x86/Kconfig

@@ -534,9 +534,10 @@ config X86_INTEL_QUARK
 
 
 config X86_INTEL_LPSS
 config X86_INTEL_LPSS
 	bool "Intel Low Power Subsystem Support"
 	bool "Intel Low Power Subsystem Support"
-	depends on ACPI
+	depends on X86 && ACPI
 	select COMMON_CLK
 	select COMMON_CLK
 	select PINCTRL
 	select PINCTRL
+	select IOSF_MBI
 	---help---
 	---help---
 	  Select to build support for Intel Low Power Subsystem such as
 	  Select to build support for Intel Low Power Subsystem such as
 	  found on Intel Lynxpoint PCH. Selecting this option enables
 	  found on Intel Lynxpoint PCH. Selecting this option enables

+ 13 - 38
arch/x86/include/asm/iosf_mbi.h

@@ -1,5 +1,5 @@
 /*
 /*
- * iosf_mbi.h: Intel OnChip System Fabric MailBox access support
+ * Intel OnChip System Fabric MailBox access support
  */
  */
 
 
 #ifndef IOSF_MBI_SYMS_H
 #ifndef IOSF_MBI_SYMS_H
@@ -16,6 +16,18 @@
 #define MBI_MASK_LO		0x000000FF
 #define MBI_MASK_LO		0x000000FF
 #define MBI_ENABLE		0xF0
 #define MBI_ENABLE		0xF0
 
 
+/* IOSF SB read/write opcodes */
+#define MBI_MMIO_READ		0x00
+#define MBI_MMIO_WRITE		0x01
+#define MBI_CFG_READ		0x04
+#define MBI_CFG_WRITE		0x05
+#define MBI_CR_READ		0x06
+#define MBI_CR_WRITE		0x07
+#define MBI_REG_READ		0x10
+#define MBI_REG_WRITE		0x11
+#define MBI_ESRAM_READ		0x12
+#define MBI_ESRAM_WRITE		0x13
+
 /* Baytrail available units */
 /* Baytrail available units */
 #define BT_MBI_UNIT_AUNIT	0x00
 #define BT_MBI_UNIT_AUNIT	0x00
 #define BT_MBI_UNIT_SMC		0x01
 #define BT_MBI_UNIT_SMC		0x01
@@ -28,50 +40,13 @@
 #define BT_MBI_UNIT_SATA	0xA3
 #define BT_MBI_UNIT_SATA	0xA3
 #define BT_MBI_UNIT_PCIE	0xA6
 #define BT_MBI_UNIT_PCIE	0xA6
 
 
-/* Baytrail read/write opcodes */
-#define BT_MBI_AUNIT_READ	0x10
-#define BT_MBI_AUNIT_WRITE	0x11
-#define BT_MBI_SMC_READ		0x10
-#define BT_MBI_SMC_WRITE	0x11
-#define BT_MBI_CPU_READ		0x10
-#define BT_MBI_CPU_WRITE	0x11
-#define BT_MBI_BUNIT_READ	0x10
-#define BT_MBI_BUNIT_WRITE	0x11
-#define BT_MBI_PMC_READ		0x06
-#define BT_MBI_PMC_WRITE	0x07
-#define BT_MBI_GFX_READ		0x00
-#define BT_MBI_GFX_WRITE	0x01
-#define BT_MBI_SMIO_READ	0x06
-#define BT_MBI_SMIO_WRITE	0x07
-#define BT_MBI_USB_READ		0x06
-#define BT_MBI_USB_WRITE	0x07
-#define BT_MBI_SATA_READ	0x00
-#define BT_MBI_SATA_WRITE	0x01
-#define BT_MBI_PCIE_READ	0x00
-#define BT_MBI_PCIE_WRITE	0x01
-
 /* Quark available units */
 /* Quark available units */
 #define QRK_MBI_UNIT_HBA	0x00
 #define QRK_MBI_UNIT_HBA	0x00
 #define QRK_MBI_UNIT_HB		0x03
 #define QRK_MBI_UNIT_HB		0x03
 #define QRK_MBI_UNIT_RMU	0x04
 #define QRK_MBI_UNIT_RMU	0x04
 #define QRK_MBI_UNIT_MM		0x05
 #define QRK_MBI_UNIT_MM		0x05
-#define QRK_MBI_UNIT_MMESRAM	0x05
 #define QRK_MBI_UNIT_SOC	0x31
 #define QRK_MBI_UNIT_SOC	0x31
 
 
-/* Quark read/write opcodes */
-#define QRK_MBI_HBA_READ	0x10
-#define QRK_MBI_HBA_WRITE	0x11
-#define QRK_MBI_HB_READ		0x10
-#define QRK_MBI_HB_WRITE	0x11
-#define QRK_MBI_RMU_READ	0x10
-#define QRK_MBI_RMU_WRITE	0x11
-#define QRK_MBI_MM_READ		0x10
-#define QRK_MBI_MM_WRITE	0x11
-#define QRK_MBI_MMESRAM_READ	0x12
-#define QRK_MBI_MMESRAM_WRITE	0x13
-#define QRK_MBI_SOC_READ	0x06
-#define QRK_MBI_SOC_WRITE	0x07
-
 #if IS_ENABLED(CONFIG_IOSF_MBI)
 #if IS_ENABLED(CONFIG_IOSF_MBI)
 
 
 bool iosf_mbi_available(void);
 bool iosf_mbi_available(void);

+ 14 - 7
arch/x86/kernel/ftrace.c

@@ -105,14 +105,14 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
 {
 {
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 
 
+	ftrace_expected = old_code;
+
 	/*
 	/*
-	 * Note: Due to modules and __init, code can
-	 *  disappear and change, we need to protect against faulting
-	 *  as well as code changing. We do this by using the
-	 *  probe_kernel_* functions.
-	 *
-	 * No real locking needed, this code is run through
-	 * kstop_machine, or before SMP starts.
+	 * Note:
+	 * We are paranoid about modifying text, as if a bug was to happen, it
+	 * could cause us to read or write to someplace that could cause harm.
+	 * Carefully read and modify the code with probe_kernel_*(), and make
+	 * sure what we read is what we expected it to be before modifying it.
 	 */
 	 */
 
 
 	/* read the text we want to modify */
 	/* read the text we want to modify */
@@ -154,6 +154,8 @@ int ftrace_make_nop(struct module *mod,
 	if (addr == MCOUNT_ADDR)
 	if (addr == MCOUNT_ADDR)
 		return ftrace_modify_code_direct(rec->ip, old, new);
 		return ftrace_modify_code_direct(rec->ip, old, new);
 
 
+	ftrace_expected = NULL;
+
 	/* Normal cases use add_brk_on_nop */
 	/* Normal cases use add_brk_on_nop */
 	WARN_ONCE(1, "invalid use of ftrace_make_nop");
 	WARN_ONCE(1, "invalid use of ftrace_make_nop");
 	return -EINVAL;
 	return -EINVAL;
@@ -220,6 +222,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 				 unsigned long addr)
 				 unsigned long addr)
 {
 {
 	WARN_ON(1);
 	WARN_ON(1);
+	ftrace_expected = NULL;
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
@@ -314,6 +317,8 @@ static int add_break(unsigned long ip, const char *old)
 	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 		return -EFAULT;
 		return -EFAULT;
 
 
+	ftrace_expected = old;
+
 	/* Make sure it is what we expect it to be */
 	/* Make sure it is what we expect it to be */
 	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
 	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
 		return -EINVAL;
 		return -EINVAL;
@@ -413,6 +418,8 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
 		ftrace_addr = ftrace_get_addr_curr(rec);
 		ftrace_addr = ftrace_get_addr_curr(rec);
 		nop = ftrace_call_replace(ip, ftrace_addr);
 		nop = ftrace_call_replace(ip, ftrace_addr);
 
 
+		ftrace_expected = nop;
+
 		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
 		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
 			return -EINVAL;
 			return -EINVAL;
 	}
 	}

+ 2 - 5
arch/x86/platform/atom/punit_atom_debug.c

@@ -25,8 +25,6 @@
 #include <asm/cpu_device_id.h>
 #include <asm/cpu_device_id.h>
 #include <asm/iosf_mbi.h>
 #include <asm/iosf_mbi.h>
 
 
-/* Side band Interface port */
-#define PUNIT_PORT		0x04
 /* Power gate status reg */
 /* Power gate status reg */
 #define PWRGT_STATUS		0x61
 #define PWRGT_STATUS		0x61
 /* Subsystem config/status Video processor */
 /* Subsystem config/status Video processor */
@@ -85,9 +83,8 @@ static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
 
 
 	seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
 	seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
 	while (punit_devp->name) {
 	while (punit_devp->name) {
-		status = iosf_mbi_read(PUNIT_PORT, BT_MBI_PMC_READ,
-				       punit_devp->reg,
-				       &punit_pwr_status);
+		status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+				       punit_devp->reg, &punit_pwr_status);
 		if (status) {
 		if (status) {
 			seq_printf(seq_file, "%9s : Read Failed\n",
 			seq_printf(seq_file, "%9s : Read Failed\n",
 				   punit_devp->name);
 				   punit_devp->name);

+ 10 - 18
arch/x86/platform/intel-quark/imr.c

@@ -111,23 +111,19 @@ static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
 	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
 	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
 	int ret;
 	int ret;
 
 
-	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
-				reg++, &imr->addr_lo);
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
-				reg++, &imr->addr_hi);
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
-				reg++, &imr->rmask);
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
-				reg++, &imr->wmask);
+	return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
 }
 }
 
 
 /**
 /**
@@ -151,31 +147,27 @@ static int imr_write(struct imr_device *idev, u32 imr_id,
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
-				imr->addr_lo);
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
 	if (ret)
 	if (ret)
 		goto failed;
 		goto failed;
 
 
-	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
-				reg++, imr->addr_hi);
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
 	if (ret)
 	if (ret)
 		goto failed;
 		goto failed;
 
 
-	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
-				reg++, imr->rmask);
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
 	if (ret)
 	if (ret)
 		goto failed;
 		goto failed;
 
 
-	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
-				reg++, imr->wmask);
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
 	if (ret)
 	if (ret)
 		goto failed;
 		goto failed;
 
 
 	/* Lock bit must be set separately to addr_lo address bits. */
 	/* Lock bit must be set separately to addr_lo address bits. */
 	if (lock) {
 	if (lock) {
 		imr->addr_lo |= IMR_LOCK;
 		imr->addr_lo |= IMR_LOCK;
-		ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
-					reg - IMR_NUM_REGS, imr->addr_lo);
+		ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE,
+				     reg - IMR_NUM_REGS, imr->addr_lo);
 		if (ret)
 		if (ret)
 			goto failed;
 			goto failed;
 	}
 	}

+ 14 - 3
drivers/acpi/Kconfig

@@ -58,14 +58,25 @@ config ACPI_CCA_REQUIRED
 	bool
 	bool
 
 
 config ACPI_DEBUGGER
 config ACPI_DEBUGGER
-	bool "AML debugger interface (EXPERIMENTAL)"
+	bool "AML debugger interface"
 	select ACPI_DEBUG
 	select ACPI_DEBUG
 	help
 	help
-	  Enable in-kernel debugging of AML facilities: statistics, internal
-	  object dump, single step control method execution.
+	  Enable in-kernel debugging of AML facilities: statistics,
+	  internal object dump, single step control method execution.
 	  This is still under development, currently enabling this only
 	  This is still under development, currently enabling this only
 	  results in the compilation of the ACPICA debugger files.
 	  results in the compilation of the ACPICA debugger files.
 
 
+if ACPI_DEBUGGER
+
+config ACPI_DEBUGGER_USER
+	tristate "Userspace debugger accessiblity"
+	depends on DEBUG_FS
+	help
+	  Export /sys/kernel/debug/acpi/acpidbg for userspace utilities
+	  to access the debugger functionalities.
+
+endif
+
 config ACPI_SLEEP
 config ACPI_SLEEP
 	bool
 	bool
 	depends on SUSPEND || HIBERNATION
 	depends on SUSPEND || HIBERNATION

+ 5 - 4
drivers/acpi/Makefile

@@ -8,13 +8,13 @@ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
 #
 #
 # ACPI Boot-Time Table Parsing
 # ACPI Boot-Time Table Parsing
 #
 #
-obj-y				+= tables.o
+obj-$(CONFIG_ACPI)		+= tables.o
 obj-$(CONFIG_X86)		+= blacklist.o
 obj-$(CONFIG_X86)		+= blacklist.o
 
 
 #
 #
 # ACPI Core Subsystem (Interpreter)
 # ACPI Core Subsystem (Interpreter)
 #
 #
-obj-y				+= acpi.o \
+obj-$(CONFIG_ACPI)		+= acpi.o \
 					acpica/
 					acpica/
 
 
 # All the builtin files are in the "acpi." module_param namespace.
 # All the builtin files are in the "acpi." module_param namespace.
@@ -66,10 +66,10 @@ obj-$(CONFIG_ACPI_FAN)		+= fan.o
 obj-$(CONFIG_ACPI_VIDEO)	+= video.o
 obj-$(CONFIG_ACPI_VIDEO)	+= video.o
 obj-$(CONFIG_ACPI_PCI_SLOT)	+= pci_slot.o
 obj-$(CONFIG_ACPI_PCI_SLOT)	+= pci_slot.o
 obj-$(CONFIG_ACPI_PROCESSOR)	+= processor.o
 obj-$(CONFIG_ACPI_PROCESSOR)	+= processor.o
-obj-y				+= container.o
+obj-$(CONFIG_ACPI)		+= container.o
 obj-$(CONFIG_ACPI_THERMAL)	+= thermal.o
 obj-$(CONFIG_ACPI_THERMAL)	+= thermal.o
 obj-$(CONFIG_ACPI_NFIT)		+= nfit.o
 obj-$(CONFIG_ACPI_NFIT)		+= nfit.o
-obj-y				+= acpi_memhotplug.o
+obj-$(CONFIG_ACPI)		+= acpi_memhotplug.o
 obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
 obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
 obj-$(CONFIG_ACPI_BATTERY)	+= battery.o
 obj-$(CONFIG_ACPI_BATTERY)	+= battery.o
 obj-$(CONFIG_ACPI_SBS)		+= sbshc.o
 obj-$(CONFIG_ACPI_SBS)		+= sbshc.o
@@ -79,6 +79,7 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS)	+= ec_sys.o
 obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
 obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
 obj-$(CONFIG_ACPI_BGRT)		+= bgrt.o
 obj-$(CONFIG_ACPI_BGRT)		+= bgrt.o
 obj-$(CONFIG_ACPI_CPPC_LIB)	+= cppc_acpi.o
 obj-$(CONFIG_ACPI_CPPC_LIB)	+= cppc_acpi.o
+obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
 
 
 # processor has its own "processor." module_param namespace
 # processor has its own "processor." module_param namespace
 processor-y			:= processor_driver.o
 processor-y			:= processor_driver.o

+ 15 - 1
drivers/acpi/acpi_apd.c

@@ -51,7 +51,7 @@ struct apd_private_data {
 	const struct apd_device_desc *dev_desc;
 	const struct apd_device_desc *dev_desc;
 };
 };
 
 
-#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
+#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64)
 #define APD_ADDR(desc)	((unsigned long)&desc)
 #define APD_ADDR(desc)	((unsigned long)&desc)
 
 
 static int acpi_apd_setup(struct apd_private_data *pdata)
 static int acpi_apd_setup(struct apd_private_data *pdata)
@@ -71,6 +71,7 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
 	return 0;
 	return 0;
 }
 }
 
 
+#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
 static struct apd_device_desc cz_i2c_desc = {
 static struct apd_device_desc cz_i2c_desc = {
 	.setup = acpi_apd_setup,
 	.setup = acpi_apd_setup,
 	.fixed_clk_rate = 133000000,
 	.fixed_clk_rate = 133000000,
@@ -80,6 +81,14 @@ static struct apd_device_desc cz_uart_desc = {
 	.setup = acpi_apd_setup,
 	.setup = acpi_apd_setup,
 	.fixed_clk_rate = 48000000,
 	.fixed_clk_rate = 48000000,
 };
 };
+#endif
+
+#ifdef CONFIG_ARM64
+static struct apd_device_desc xgene_i2c_desc = {
+	.setup = acpi_apd_setup,
+	.fixed_clk_rate = 100000000,
+};
+#endif
 
 
 #else
 #else
 
 
@@ -132,9 +141,14 @@ static int acpi_apd_create_device(struct acpi_device *adev,
 
 
 static const struct acpi_device_id acpi_apd_device_ids[] = {
 static const struct acpi_device_id acpi_apd_device_ids[] = {
 	/* Generic apd devices */
 	/* Generic apd devices */
+#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
 	{ "AMD0010", APD_ADDR(cz_i2c_desc) },
 	{ "AMD0010", APD_ADDR(cz_i2c_desc) },
 	{ "AMD0020", APD_ADDR(cz_uart_desc) },
 	{ "AMD0020", APD_ADDR(cz_uart_desc) },
 	{ "AMD0030", },
 	{ "AMD0030", },
+#endif
+#ifdef CONFIG_ARM64
+	{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
+#endif
 	{ }
 	{ }
 };
 };
 
 

+ 804 - 0
drivers/acpi/acpi_dbg.c

@@ -0,0 +1,804 @@
+/*
+ * ACPI AML interfacing support
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Authors: Lv Zheng <lv.zheng@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : AML: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <linux/acpi.h>
+#include "internal.h"
+
+#define ACPI_AML_BUF_ALIGN	(sizeof (acpi_size))
+#define ACPI_AML_BUF_SIZE	PAGE_SIZE
+
+#define circ_count(circ) \
+	(CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_count_to_end(circ) \
+	(CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_space(circ) \
+	(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+#define circ_space_to_end(circ) \
+	(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
+
+#define ACPI_AML_OPENED		0x0001
+#define ACPI_AML_CLOSED		0x0002
+#define ACPI_AML_IN_USER	0x0004 /* user space is writing cmd */
+#define ACPI_AML_IN_KERN	0x0008 /* kernel space is reading cmd */
+#define ACPI_AML_OUT_USER	0x0010 /* user space is reading log */
+#define ACPI_AML_OUT_KERN	0x0020 /* kernel space is writing log */
+#define ACPI_AML_USER		(ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
+#define ACPI_AML_KERN		(ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
+#define ACPI_AML_BUSY		(ACPI_AML_USER | ACPI_AML_KERN)
+#define ACPI_AML_OPEN		(ACPI_AML_OPENED | ACPI_AML_CLOSED)
+
+struct acpi_aml_io {
+	wait_queue_head_t wait;
+	unsigned long flags;
+	unsigned long users;
+	struct mutex lock;
+	struct task_struct *thread;
+	char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
+	struct circ_buf out_crc;
+	char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
+	struct circ_buf in_crc;
+	acpi_osd_exec_callback function;
+	void *context;
+	unsigned long usages;
+};
+
+static struct acpi_aml_io acpi_aml_io;
+static bool acpi_aml_initialized;
+static struct file *acpi_aml_active_reader;
+static struct dentry *acpi_aml_dentry;
+
+static inline bool __acpi_aml_running(void)
+{
+	return acpi_aml_io.thread ? true : false;
+}
+
+static inline bool __acpi_aml_access_ok(unsigned long flag)
+{
+	/*
+	 * The debugger interface is in opened state (OPENED && !CLOSED),
+	 * then it is allowed to access the debugger buffers from either
+	 * user space or the kernel space.
+	 * In addition, for the kernel space, only the debugger thread
+	 * (thread ID matched) is allowed to access.
+	 */
+	if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
+	    (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
+	    !__acpi_aml_running())
+		return false;
+	if ((flag & ACPI_AML_KERN) &&
+	    current != acpi_aml_io.thread)
+		return false;
+	return true;
+}
+
+static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
+{
+	/*
+	 * Another read is not in progress and there is data in buffer
+	 * available for read.
+	 */
+	if (!(acpi_aml_io.flags & flag) && circ_count(circ))
+		return true;
+	return false;
+}
+
+static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
+{
+	/*
+	 * Another write is not in progress and there is buffer space
+	 * available for write.
+	 */
+	if (!(acpi_aml_io.flags & flag) && circ_space(circ))
+		return true;
+	return false;
+}
+
+static inline bool __acpi_aml_busy(void)
+{
+	if (acpi_aml_io.flags & ACPI_AML_BUSY)
+		return true;
+	return false;
+}
+
+static inline bool __acpi_aml_opened(void)
+{
+	if (acpi_aml_io.flags & ACPI_AML_OPEN)
+		return true;
+	return false;
+}
+
+static inline bool __acpi_aml_used(void)
+{
+	return acpi_aml_io.usages ? true : false;
+}
+
+static inline bool acpi_aml_running(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = __acpi_aml_running();
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_busy(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = __acpi_aml_busy();
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_used(void)
+{
+	bool ret;
+
+	/*
+	 * The usage count is prepared to avoid race conditions between the
+	 * starts and the stops of the debugger thread.
+	 */
+	mutex_lock(&acpi_aml_io.lock);
+	ret = __acpi_aml_used();
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_kern_readable(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
+	      __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_kern_writable(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
+	      __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_user_readable(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
+	      __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static bool acpi_aml_user_writable(void)
+{
+	bool ret;
+
+	mutex_lock(&acpi_aml_io.lock);
+	ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
+	      __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
+{
+	int ret = 0;
+
+	mutex_lock(&acpi_aml_io.lock);
+	if (!__acpi_aml_access_ok(flag)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	if (!__acpi_aml_writable(circ, flag)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	acpi_aml_io.flags |= flag;
+out:
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
+{
+	int ret = 0;
+
+	mutex_lock(&acpi_aml_io.lock);
+	if (!__acpi_aml_access_ok(flag)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	if (!__acpi_aml_readable(circ, flag)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	acpi_aml_io.flags |= flag;
+out:
+	mutex_unlock(&acpi_aml_io.lock);
+	return ret;
+}
+
+static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
+{
+	mutex_lock(&acpi_aml_io.lock);
+	acpi_aml_io.flags &= ~flag;
+	if (wakeup)
+		wake_up_interruptible(&acpi_aml_io.wait);
+	mutex_unlock(&acpi_aml_io.lock);
+}
+
+static int acpi_aml_write_kern(const char *buf, int len)
+{
+	int ret;
+	struct circ_buf *crc = &acpi_aml_io.out_crc;
+	int n;
+	char *p;
+
+	ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+	/* sync tail before inserting logs */
+	smp_mb();
+	p = &crc->buf[crc->head];
+	n = min(len, circ_space_to_end(crc));
+	memcpy(p, buf, n);
+	/* sync head after inserting logs */
+	smp_wmb();
+	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
+	acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
+	return n;
+}
+
+static int acpi_aml_readb_kern(void)
+{
+	int ret;
+	struct circ_buf *crc = &acpi_aml_io.in_crc;
+	char *p;
+
+	ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+	/* sync head before removing cmds */
+	smp_rmb();
+	p = &crc->buf[crc->tail];
+	ret = (int)*p;
+	/* sync tail before inserting cmds */
+	smp_mb();
+	crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
+	acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
+	return ret;
+}
+
+/*
+ * acpi_aml_write_log() - Capture debugger output
+ * @msg: the debugger output
+ *
+ * This function should be used to implement acpi_os_printf() to filter out
+ * the debugger output and store the output into the debugger interface
+ * buffer. Return the size of stored logs or errno.
+ */
+static ssize_t acpi_aml_write_log(const char *msg)
+{
+	int ret = 0;
+	int count = 0, size = 0;
+
+	if (!acpi_aml_initialized)
+		return -ENODEV;
+	if (msg)
+		count = strlen(msg);
+	while (count > 0) {
+again:
+		ret = acpi_aml_write_kern(msg + size, count);
+		if (ret == -EAGAIN) {
+			ret = wait_event_interruptible(acpi_aml_io.wait,
+				acpi_aml_kern_writable());
+			/*
+			 * We need to retry when the condition
+			 * becomes true.
+			 */
+			if (ret == 0)
+				goto again;
+			break;
+		}
+		if (IS_ERR_VALUE(ret))
+			break;
+		size += ret;
+		count -= ret;
+	}
+	return size > 0 ? size : ret;
+}
+
+/*
+ * acpi_aml_read_cmd() - Capture debugger input
+ * @msg: the debugger input
+ * @size: the size of the debugger input
+ *
+ * This function should be used to implement acpi_os_get_line() to capture
+ * the debugger input commands and store the input commands into the
+ * debugger interface buffer. Return the size of stored commands or errno.
+ */
+static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
+{
+	int ret = 0;
+	int size = 0;
+
+	/*
+	 * This is ensured by the running fact of the debugger thread
+	 * unless a bug is introduced.
+	 */
+	BUG_ON(!acpi_aml_initialized);
+	while (count > 0) {
+again:
+		/*
+		 * Check each input byte to find the end of the command.
+		 */
+		ret = acpi_aml_readb_kern();
+		if (ret == -EAGAIN) {
+			ret = wait_event_interruptible(acpi_aml_io.wait,
+				acpi_aml_kern_readable());
+			/*
+			 * We need to retry when the condition becomes
+			 * true.
+			 */
+			if (ret == 0)
+				goto again;
+		}
+		if (IS_ERR_VALUE(ret))
+			break;
+		*(msg + size) = (char)ret;
+		size++;
+		count--;
+		if (ret == '\n') {
+			/*
+			 * acpi_os_get_line() requires a zero terminated command
+			 * string.
+			 */
+			*(msg + size - 1) = '\0';
+			break;
+		}
+	}
+	return size > 0 ? size : ret;
+}
+
+static int acpi_aml_thread(void *unsed)
+{
+	acpi_osd_exec_callback function = NULL;
+	void *context;
+
+	mutex_lock(&acpi_aml_io.lock);
+	if (acpi_aml_io.function) {
+		acpi_aml_io.usages++;
+		function = acpi_aml_io.function;
+		context = acpi_aml_io.context;
+	}
+	mutex_unlock(&acpi_aml_io.lock);
+
+	if (function)
+		function(context);
+
+	mutex_lock(&acpi_aml_io.lock);
+	acpi_aml_io.usages--;
+	if (!__acpi_aml_used()) {
+		acpi_aml_io.thread = NULL;
+		wake_up(&acpi_aml_io.wait);
+	}
+	mutex_unlock(&acpi_aml_io.lock);
+
+	return 0;
+}
+
+/*
+ * acpi_aml_create_thread() - Create AML debugger thread
+ * @function: the debugger thread callback
+ * @context: the context to be passed to the debugger thread
+ *
+ * This function should be used to implement acpi_os_execute() which is
+ * used by the ACPICA debugger to create the debugger thread.
+ */
+static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
+{
+	struct task_struct *t;
+
+	mutex_lock(&acpi_aml_io.lock);
+	acpi_aml_io.function = function;
+	acpi_aml_io.context = context;
+	mutex_unlock(&acpi_aml_io.lock);
+
+	t = kthread_create(acpi_aml_thread, NULL, "aml");
+	if (IS_ERR(t)) {
+		pr_err("Failed to create AML debugger thread.\n");
+		return PTR_ERR(t);
+	}
+
+	mutex_lock(&acpi_aml_io.lock);
+	acpi_aml_io.thread = t;
+	acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
+	wake_up_process(t);
+	mutex_unlock(&acpi_aml_io.lock);
+	return 0;
+}
+
+static int acpi_aml_wait_command_ready(bool single_step,
+				       char *buffer, size_t length)
+{
+	acpi_status status;
+
+	if (single_step)
+		acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
+	else
+		acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
+
+	status = acpi_os_get_line(buffer, length, NULL);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+	return 0;
+}
+
+static int acpi_aml_notify_command_complete(void)
+{
+	return 0;
+}
+
+static int acpi_aml_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	acpi_status status;
+
+	mutex_lock(&acpi_aml_io.lock);
+	/*
+	 * The debugger interface is being closed, no new user is allowed
+	 * during this period.
+	 */
+	if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
+		ret = -EBUSY;
+		goto err_lock;
+	}
+	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+		/*
+		 * Only one reader is allowed to initiate the debugger
+		 * thread.
+		 */
+		if (acpi_aml_active_reader) {
+			ret = -EBUSY;
+			goto err_lock;
+		} else {
+			pr_debug("Opening debugger reader.\n");
+			acpi_aml_active_reader = file;
+		}
+	} else {
+		/*
+		 * No writer is allowed unless the debugger thread is
+		 * ready.
+		 */
+		if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
+			ret = -ENODEV;
+			goto err_lock;
+		}
+	}
+	if (acpi_aml_active_reader == file) {
+		pr_debug("Opening debugger interface.\n");
+		mutex_unlock(&acpi_aml_io.lock);
+
+		pr_debug("Initializing debugger thread.\n");
+		status = acpi_initialize_debugger();
+		if (ACPI_FAILURE(status)) {
+			pr_err("Failed to initialize debugger.\n");
+			ret = -EINVAL;
+			goto err_exit;
+		}
+		pr_debug("Debugger thread initialized.\n");
+
+		mutex_lock(&acpi_aml_io.lock);
+		acpi_aml_io.flags |= ACPI_AML_OPENED;
+		acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
+		acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
+		pr_debug("Debugger interface opened.\n");
+	}
+	acpi_aml_io.users++;
+err_lock:
+	if (IS_ERR_VALUE(ret)) {
+		if (acpi_aml_active_reader == file)
+			acpi_aml_active_reader = NULL;
+	}
+	mutex_unlock(&acpi_aml_io.lock);
+err_exit:
+	return ret;
+}
+
+static int acpi_aml_release(struct inode *inode, struct file *file)
+{
+	mutex_lock(&acpi_aml_io.lock);
+	acpi_aml_io.users--;
+	if (file == acpi_aml_active_reader) {
+		pr_debug("Closing debugger reader.\n");
+		acpi_aml_active_reader = NULL;
+
+		pr_debug("Closing debugger interface.\n");
+		acpi_aml_io.flags |= ACPI_AML_CLOSED;
+
+		/*
+		 * Wake up all user space/kernel space blocked
+		 * readers/writers.
+		 */
+		wake_up_interruptible(&acpi_aml_io.wait);
+		mutex_unlock(&acpi_aml_io.lock);
+		/*
+		 * Wait all user space/kernel space readers/writers to
+		 * stop so that ACPICA command loop of the debugger thread
+		 * should fail all its command line reads after this point.
+		 */
+		wait_event(acpi_aml_io.wait, !acpi_aml_busy());
+
+		/*
+		 * Then we try to terminate the debugger thread if it is
+		 * not terminated.
+		 */
+		pr_debug("Terminating debugger thread.\n");
+		acpi_terminate_debugger();
+		wait_event(acpi_aml_io.wait, !acpi_aml_used());
+		pr_debug("Debugger thread terminated.\n");
+
+		mutex_lock(&acpi_aml_io.lock);
+		acpi_aml_io.flags &= ~ACPI_AML_OPENED;
+	}
+	if (acpi_aml_io.users == 0) {
+		pr_debug("Debugger interface closed.\n");
+		acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
+	}
+	mutex_unlock(&acpi_aml_io.lock);
+	return 0;
+}
+
+static int acpi_aml_read_user(char __user *buf, int len)
+{
+	int ret;
+	struct circ_buf *crc = &acpi_aml_io.out_crc;
+	int n;
+	char *p;
+
+	ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+	/* sync head before removing logs */
+	smp_rmb();
+	p = &crc->buf[crc->tail];
+	n = min(len, circ_count_to_end(crc));
+	if (copy_to_user(buf, p, n)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	/* sync tail after removing logs */
+	smp_mb();
+	crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
+	ret = n;
+out:
+	acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
+	return ret;
+}
+
+static ssize_t acpi_aml_read(struct file *file, char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	int size = 0;
+
+	if (!count)
+		return 0;
+	if (!access_ok(VERIFY_WRITE, buf, count))
+		return -EFAULT;
+
+	while (count > 0) {
+again:
+		ret = acpi_aml_read_user(buf + size, count);
+		if (ret == -EAGAIN) {
+			if (file->f_flags & O_NONBLOCK)
+				break;
+			else {
+				ret = wait_event_interruptible(acpi_aml_io.wait,
+					acpi_aml_user_readable());
+				/*
+				 * We need to retry when the condition
+				 * becomes true.
+				 */
+				if (ret == 0)
+					goto again;
+			}
+		}
+		if (IS_ERR_VALUE(ret)) {
+			if (!acpi_aml_running())
+				ret = 0;
+			break;
+		}
+		if (ret) {
+			size += ret;
+			count -= ret;
+			*ppos += ret;
+			break;
+		}
+	}
+	return size > 0 ? size : ret;
+}
+
+static int acpi_aml_write_user(const char __user *buf, int len)
+{
+	int ret;
+	struct circ_buf *crc = &acpi_aml_io.in_crc;
+	int n;
+	char *p;
+
+	ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+	/* sync tail before inserting cmds */
+	smp_mb();
+	p = &crc->buf[crc->head];
+	n = min(len, circ_space_to_end(crc));
+	if (copy_from_user(p, buf, n)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	/* sync head after inserting cmds */
+	smp_wmb();
+	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
+	ret = n;
+out:
+	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
+	return n;
+}
+
+static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	int size = 0;
+
+	if (!count)
+		return 0;
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+
+	while (count > 0) {
+again:
+		ret = acpi_aml_write_user(buf + size, count);
+		if (ret == -EAGAIN) {
+			if (file->f_flags & O_NONBLOCK)
+				break;
+			else {
+				ret = wait_event_interruptible(acpi_aml_io.wait,
+					acpi_aml_user_writable());
+				/*
+				 * We need to retry when the condition
+				 * becomes true.
+				 */
+				if (ret == 0)
+					goto again;
+			}
+		}
+		if (IS_ERR_VALUE(ret)) {
+			if (!acpi_aml_running())
+				ret = 0;
+			break;
+		}
+		if (ret) {
+			size += ret;
+			count -= ret;
+			*ppos += ret;
+		}
+	}
+	return size > 0 ? size : ret;
+}
+
+static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
+{
+	int masks = 0;
+
+	poll_wait(file, &acpi_aml_io.wait, wait);
+	if (acpi_aml_user_readable())
+		masks |= POLLIN | POLLRDNORM;
+	if (acpi_aml_user_writable())
+		masks |= POLLOUT | POLLWRNORM;
+
+	return masks;
+}
+
+static const struct file_operations acpi_aml_operations = {
+	.read		= acpi_aml_read,
+	.write		= acpi_aml_write,
+	.poll		= acpi_aml_poll,
+	.open		= acpi_aml_open,
+	.release	= acpi_aml_release,
+	.llseek		= generic_file_llseek,
+};
+
+static const struct acpi_debugger_ops acpi_aml_debugger = {
+	.create_thread		 = acpi_aml_create_thread,
+	.read_cmd		 = acpi_aml_read_cmd,
+	.write_log		 = acpi_aml_write_log,
+	.wait_command_ready	 = acpi_aml_wait_command_ready,
+	.notify_command_complete = acpi_aml_notify_command_complete,
+};
+
+int __init acpi_aml_init(void)
+{
+	int ret = 0;
+
+	if (!acpi_debugfs_dir) {
+		ret = -ENOENT;
+		goto err_exit;
+	}
+
+	/* Initialize AML IO interface */
+	mutex_init(&acpi_aml_io.lock);
+	init_waitqueue_head(&acpi_aml_io.wait);
+	acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
+	acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
+	acpi_aml_dentry = debugfs_create_file("acpidbg",
+					      S_IFREG | S_IRUGO | S_IWUSR,
+					      acpi_debugfs_dir, NULL,
+					      &acpi_aml_operations);
+	if (acpi_aml_dentry == NULL) {
+		ret = -ENODEV;
+		goto err_exit;
+	}
+	ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
+	if (ret)
+		goto err_fs;
+	acpi_aml_initialized = true;
+
+err_fs:
+	if (ret) {
+		debugfs_remove(acpi_aml_dentry);
+		acpi_aml_dentry = NULL;
+	}
+err_exit:
+	return ret;
+}
+
+void __exit acpi_aml_exit(void)
+{
+	if (acpi_aml_initialized) {
+		acpi_unregister_debugger(&acpi_aml_debugger);
+		if (acpi_aml_dentry) {
+			debugfs_remove(acpi_aml_dentry);
+			acpi_aml_dentry = NULL;
+		}
+		acpi_aml_initialized = false;
+	}
+}
+
+module_init(acpi_aml_init);
+module_exit(acpi_aml_exit);
+
+MODULE_AUTHOR("Lv Zheng");
+MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
+MODULE_LICENSE("GPL");

+ 198 - 15
drivers/acpi/acpi_lpss.c

@@ -15,6 +15,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/clk-lpss.h>
 #include <linux/platform_data/clk-lpss.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_runtime.h>
@@ -26,6 +27,10 @@ ACPI_MODULE_NAME("acpi_lpss");
 
 
 #ifdef CONFIG_X86_INTEL_LPSS
 #ifdef CONFIG_X86_INTEL_LPSS
 
 
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#include <asm/pmc_atom.h>
+
 #define LPSS_ADDR(desc) ((unsigned long)&desc)
 #define LPSS_ADDR(desc) ((unsigned long)&desc)
 
 
 #define LPSS_CLK_SIZE	0x04
 #define LPSS_CLK_SIZE	0x04
@@ -71,7 +76,7 @@ struct lpss_device_desc {
 	void (*setup)(struct lpss_private_data *pdata);
 	void (*setup)(struct lpss_private_data *pdata);
 };
 };
 
 
-static struct lpss_device_desc lpss_dma_desc = {
+static const struct lpss_device_desc lpss_dma_desc = {
 	.flags = LPSS_CLK,
 	.flags = LPSS_CLK,
 };
 };
 
 
@@ -84,6 +89,23 @@ struct lpss_private_data {
 	u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
 	u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
 };
 };
 
 
+/* LPSS run time quirks */
+static unsigned int lpss_quirks;
+
+/*
+ * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
+ *
+ * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
+ * it can be powered off automatically whenever the last LPSS device goes down.
+ * In case of no power any access to the DMA controller will hang the system.
+ * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
+ * well as on ASuS T100TA transformer.
+ *
+ * This quirk overrides power state of entire LPSS island to keep DMA powered
+ * on whenever we have at least one other device in use.
+ */
+#define LPSS_QUIRK_ALWAYS_POWER_ON	BIT(0)
+
 /* UART Component Parameter Register */
 /* UART Component Parameter Register */
 #define LPSS_UART_CPR			0xF4
 #define LPSS_UART_CPR			0xF4
 #define LPSS_UART_CPR_AFCE		BIT(4)
 #define LPSS_UART_CPR_AFCE		BIT(4)
@@ -196,13 +218,21 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
 	.setup = byt_i2c_setup,
 	.setup = byt_i2c_setup,
 };
 };
 
 
-static struct lpss_device_desc bsw_spi_dev_desc = {
+static const struct lpss_device_desc bsw_spi_dev_desc = {
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
 			| LPSS_NO_D3_DELAY,
 			| LPSS_NO_D3_DELAY,
 	.prv_offset = 0x400,
 	.prv_offset = 0x400,
 	.setup = lpss_deassert_reset,
 	.setup = lpss_deassert_reset,
 };
 };
 
 
+#define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id lpss_cpu_ids[] = {
+	ICPU(0x37),	/* Valleyview, Bay Trail */
+	ICPU(0x4c),	/* Braswell, Cherry Trail */
+	{}
+};
+
 #else
 #else
 
 
 #define LPSS_ADDR(desc) (0UL)
 #define LPSS_ADDR(desc) (0UL)
@@ -574,6 +604,17 @@ static void acpi_lpss_restore_ctx(struct device *dev,
 {
 {
 	unsigned int i;
 	unsigned int i;
 
 
+	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
+		unsigned long offset = i * sizeof(u32);
+
+		__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
+		dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
+			pdata->prv_reg_ctx[i], offset);
+	}
+}
+
+static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
+{
 	/*
 	/*
 	 * The following delay is needed or the subsequent write operations may
 	 * The following delay is needed or the subsequent write operations may
 	 * fail. The LPSS devices are actually PCI devices and the PCI spec
 	 * fail. The LPSS devices are actually PCI devices and the PCI spec
@@ -586,14 +627,34 @@ static void acpi_lpss_restore_ctx(struct device *dev,
 		delay = 0;
 		delay = 0;
 
 
 	msleep(delay);
 	msleep(delay);
+}
 
 
-	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
-		unsigned long offset = i * sizeof(u32);
+static int acpi_lpss_activate(struct device *dev)
+{
+	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+	int ret;
 
 
-		__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
-		dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
-			pdata->prv_reg_ctx[i], offset);
-	}
+	ret = acpi_dev_runtime_resume(dev);
+	if (ret)
+		return ret;
+
+	acpi_lpss_d3_to_d0_delay(pdata);
+
+	/*
+	 * This is called only on ->probe() stage where a device is either in
+	 * known state defined by BIOS or most likely powered off. Due to this
+	 * we have to deassert reset line to be sure that ->probe() will
+	 * recognize the device.
+	 */
+	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+		lpss_deassert_reset(pdata);
+
+	return 0;
+}
+
+static void acpi_lpss_dismiss(struct device *dev)
+{
+	acpi_dev_runtime_suspend(dev);
 }
 }
 
 
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_PM_SLEEP
@@ -621,6 +682,8 @@ static int acpi_lpss_resume_early(struct device *dev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	acpi_lpss_d3_to_d0_delay(pdata);
+
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 		acpi_lpss_restore_ctx(dev, pdata);
 		acpi_lpss_restore_ctx(dev, pdata);
 
 
@@ -628,6 +691,89 @@ static int acpi_lpss_resume_early(struct device *dev)
 }
 }
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM_SLEEP */
 
 
+/* IOSF SB for LPSS island */
+#define LPSS_IOSF_UNIT_LPIOEP		0xA0
+#define LPSS_IOSF_UNIT_LPIO1		0xAB
+#define LPSS_IOSF_UNIT_LPIO2		0xAC
+
+#define LPSS_IOSF_PMCSR			0x84
+#define LPSS_PMCSR_D0			0
+#define LPSS_PMCSR_D3hot		3
+#define LPSS_PMCSR_Dx_MASK		GENMASK(1, 0)
+
+#define LPSS_IOSF_GPIODEF0		0x154
+#define LPSS_GPIODEF0_DMA1_D3		BIT(2)
+#define LPSS_GPIODEF0_DMA2_D3		BIT(3)
+#define LPSS_GPIODEF0_DMA_D3_MASK	GENMASK(3, 2)
+
+static DEFINE_MUTEX(lpss_iosf_mutex);
+
+static void lpss_iosf_enter_d3_state(void)
+{
+	u32 value1 = 0;
+	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+	u32 value2 = LPSS_PMCSR_D3hot;
+	u32 mask2 = LPSS_PMCSR_Dx_MASK;
+	/*
+	 * PMC provides an information about actual status of the LPSS devices.
+	 * Here we read the values related to LPSS power island, i.e. LPSS
+	 * devices, excluding both LPSS DMA controllers, along with SCC domain.
+	 */
+	u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
+	int ret;
+
+	ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
+	if (ret)
+		return;
+
+	mutex_lock(&lpss_iosf_mutex);
+
+	ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
+	if (ret)
+		goto exit;
+
+	/*
+	 * Get the status of entire LPSS power island per device basis.
+	 * Shutdown both LPSS DMA controllers if and only if all other devices
+	 * are already in D3hot.
+	 */
+	pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
+	if (pmc_status)
+		goto exit;
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
+			LPSS_IOSF_PMCSR, value2, mask2);
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
+			LPSS_IOSF_PMCSR, value2, mask2);
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
+			LPSS_IOSF_GPIODEF0, value1, mask1);
+exit:
+	mutex_unlock(&lpss_iosf_mutex);
+}
+
+static void lpss_iosf_exit_d3_state(void)
+{
+	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
+	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+	u32 value2 = LPSS_PMCSR_D0;
+	u32 mask2 = LPSS_PMCSR_Dx_MASK;
+
+	mutex_lock(&lpss_iosf_mutex);
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
+			LPSS_IOSF_GPIODEF0, value1, mask1);
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
+			LPSS_IOSF_PMCSR, value2, mask2);
+
+	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
+			LPSS_IOSF_PMCSR, value2, mask2);
+
+	mutex_unlock(&lpss_iosf_mutex);
+}
+
 static int acpi_lpss_runtime_suspend(struct device *dev)
 static int acpi_lpss_runtime_suspend(struct device *dev)
 {
 {
 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
@@ -640,7 +786,17 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 		acpi_lpss_save_ctx(dev, pdata);
 		acpi_lpss_save_ctx(dev, pdata);
 
 
-	return acpi_dev_runtime_suspend(dev);
+	ret = acpi_dev_runtime_suspend(dev);
+
+	/*
+	 * This call must be last in the sequence, otherwise PMC will return
+	 * wrong status for devices being about to be powered off. See
+	 * lpss_iosf_enter_d3_state() for further information.
+	 */
+	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+		lpss_iosf_enter_d3_state();
+
+	return ret;
 }
 }
 
 
 static int acpi_lpss_runtime_resume(struct device *dev)
 static int acpi_lpss_runtime_resume(struct device *dev)
@@ -648,10 +804,19 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 	int ret;
 	int ret;
 
 
+	/*
+	 * This call is kept first to be in symmetry with
+	 * acpi_lpss_runtime_suspend() one.
+	 */
+	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+		lpss_iosf_exit_d3_state();
+
 	ret = acpi_dev_runtime_resume(dev);
 	ret = acpi_dev_runtime_resume(dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	acpi_lpss_d3_to_d0_delay(pdata);
+
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
 		acpi_lpss_restore_ctx(dev, pdata);
 		acpi_lpss_restore_ctx(dev, pdata);
 
 
@@ -660,6 +825,10 @@ static int acpi_lpss_runtime_resume(struct device *dev)
 #endif /* CONFIG_PM */
 #endif /* CONFIG_PM */
 
 
 static struct dev_pm_domain acpi_lpss_pm_domain = {
 static struct dev_pm_domain acpi_lpss_pm_domain = {
+#ifdef CONFIG_PM
+	.activate = acpi_lpss_activate,
+	.dismiss = acpi_lpss_dismiss,
+#endif
 	.ops = {
 	.ops = {
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_PM_SLEEP
@@ -705,8 +874,14 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
 	}
 	}
 
 
 	switch (action) {
 	switch (action) {
-	case BUS_NOTIFY_ADD_DEVICE:
+	case BUS_NOTIFY_BIND_DRIVER:
 		pdev->dev.pm_domain = &acpi_lpss_pm_domain;
 		pdev->dev.pm_domain = &acpi_lpss_pm_domain;
+		break;
+	case BUS_NOTIFY_DRIVER_NOT_BOUND:
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		pdev->dev.pm_domain = NULL;
+		break;
+	case BUS_NOTIFY_ADD_DEVICE:
 		if (pdata->dev_desc->flags & LPSS_LTR)
 		if (pdata->dev_desc->flags & LPSS_LTR)
 			return sysfs_create_group(&pdev->dev.kobj,
 			return sysfs_create_group(&pdev->dev.kobj,
 						  &lpss_attr_group);
 						  &lpss_attr_group);
@@ -714,7 +889,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
 	case BUS_NOTIFY_DEL_DEVICE:
 	case BUS_NOTIFY_DEL_DEVICE:
 		if (pdata->dev_desc->flags & LPSS_LTR)
 		if (pdata->dev_desc->flags & LPSS_LTR)
 			sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
 			sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
-		pdev->dev.pm_domain = NULL;
 		break;
 		break;
 	default:
 	default:
 		break;
 		break;
@@ -754,10 +928,19 @@ static struct acpi_scan_handler lpss_handler = {
 
 
 void __init acpi_lpss_init(void)
 void __init acpi_lpss_init(void)
 {
 {
-	if (!lpt_clk_init()) {
-		bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
-		acpi_scan_add_handler(&lpss_handler);
-	}
+	const struct x86_cpu_id *id;
+	int ret;
+
+	ret = lpt_clk_init();
+	if (ret)
+		return;
+
+	id = x86_match_cpu(lpss_cpu_ids);
+	if (id)
+		lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
+
+	bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
+	acpi_scan_add_handler(&lpss_handler);
 }
 }
 
 
 #else
 #else

+ 1 - 1
drivers/acpi/acpi_pnp.c

@@ -367,7 +367,7 @@ static struct acpi_scan_handler acpi_pnp_handler = {
  */
  */
 static int is_cmos_rtc_device(struct acpi_device *adev)
 static int is_cmos_rtc_device(struct acpi_device *adev)
 {
 {
-	struct acpi_device_id ids[] = {
+	static const struct acpi_device_id ids[] = {
 		{ "PNP0B00" },
 		{ "PNP0B00" },
 		{ "PNP0B01" },
 		{ "PNP0B01" },
 		{ "PNP0B02" },
 		{ "PNP0B02" },

+ 61 - 15
drivers/acpi/acpi_video.c

@@ -77,14 +77,21 @@ module_param(allow_duplicates, bool, 0644);
 static int disable_backlight_sysfs_if = -1;
 static int disable_backlight_sysfs_if = -1;
 module_param(disable_backlight_sysfs_if, int, 0444);
 module_param(disable_backlight_sysfs_if, int, 0444);
 
 
+#define REPORT_OUTPUT_KEY_EVENTS		0x01
+#define REPORT_BRIGHTNESS_KEY_EVENTS		0x02
+static int report_key_events = -1;
+module_param(report_key_events, int, 0644);
+MODULE_PARM_DESC(report_key_events,
+	"0: none, 1: output changes, 2: brightness changes, 3: all");
+
 static bool device_id_scheme = false;
 static bool device_id_scheme = false;
 module_param(device_id_scheme, bool, 0444);
 module_param(device_id_scheme, bool, 0444);
 
 
 static bool only_lcd = false;
 static bool only_lcd = false;
 module_param(only_lcd, bool, 0444);
 module_param(only_lcd, bool, 0444);
 
 
-static int register_count;
-static DEFINE_MUTEX(register_count_mutex);
+static DECLARE_COMPLETION(register_done);
+static DEFINE_MUTEX(register_done_mutex);
 static struct mutex video_list_lock;
 static struct mutex video_list_lock;
 static struct list_head video_bus_head;
 static struct list_head video_bus_head;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_add(struct acpi_device *device);
@@ -412,6 +419,13 @@ static int video_enable_only_lcd(const struct dmi_system_id *d)
 	return 0;
 	return 0;
 }
 }
 
 
+static int video_set_report_key_events(const struct dmi_system_id *id)
+{
+	if (report_key_events == -1)
+		report_key_events = (uintptr_t)id->driver_data;
+	return 0;
+}
+
 static struct dmi_system_id video_dmi_table[] = {
 static struct dmi_system_id video_dmi_table[] = {
 	/*
 	/*
 	 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
 	 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -500,6 +514,24 @@ static struct dmi_system_id video_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"),
 		DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile M9410"),
 		},
 		},
 	},
 	},
+	/*
+	 * Some machines report wrong key events on the acpi-bus, suppress
+	 * key event reporting on these.  Note this is only intended to work
+	 * around events which are plain wrong. In some cases we get double
+	 * events, in this case acpi-video is considered the canonical source
+	 * and the events from the other source should be filtered. E.g.
+	 * by calling acpi_video_handles_brightness_key_presses() from the
+	 * vendor acpi/wmi driver or by using /lib/udev/hwdb.d/60-keyboard.hwdb
+	 */
+	{
+	 .callback = video_set_report_key_events,
+	 .driver_data = (void *)((uintptr_t)REPORT_OUTPUT_KEY_EVENTS),
+	 .ident = "Dell Vostro V131",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+		},
+	},
 	{}
 	{}
 };
 };
 
 
@@ -1480,7 +1512,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
 		/* Something vetoed the keypress. */
 		/* Something vetoed the keypress. */
 		keycode = 0;
 		keycode = 0;
 
 
-	if (keycode) {
+	if (keycode && (report_key_events & REPORT_OUTPUT_KEY_EVENTS)) {
 		input_report_key(input, keycode, 1);
 		input_report_key(input, keycode, 1);
 		input_sync(input);
 		input_sync(input);
 		input_report_key(input, keycode, 0);
 		input_report_key(input, keycode, 0);
@@ -1544,7 +1576,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
 
 
 	acpi_notifier_call_chain(device, event, 0);
 	acpi_notifier_call_chain(device, event, 0);
 
 
-	if (keycode) {
+	if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
 		input_report_key(input, keycode, 1);
 		input_report_key(input, keycode, 1);
 		input_sync(input);
 		input_sync(input);
 		input_report_key(input, keycode, 0);
 		input_report_key(input, keycode, 0);
@@ -2017,8 +2049,8 @@ int acpi_video_register(void)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-	mutex_lock(&register_count_mutex);
-	if (register_count) {
+	mutex_lock(&register_done_mutex);
+	if (completion_done(&register_done)) {
 		/*
 		/*
 		 * if the function of acpi_video_register is already called,
 		 * if the function of acpi_video_register is already called,
 		 * don't register the acpi_vide_bus again and return no error.
 		 * don't register the acpi_vide_bus again and return no error.
@@ -2039,22 +2071,22 @@ int acpi_video_register(void)
 	 * When the acpi_video_bus is loaded successfully, increase
 	 * When the acpi_video_bus is loaded successfully, increase
 	 * the counter reference.
 	 * the counter reference.
 	 */
 	 */
-	register_count = 1;
+	complete(&register_done);
 
 
 leave:
 leave:
-	mutex_unlock(&register_count_mutex);
+	mutex_unlock(&register_done_mutex);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(acpi_video_register);
 EXPORT_SYMBOL(acpi_video_register);
 
 
 void acpi_video_unregister(void)
 void acpi_video_unregister(void)
 {
 {
-	mutex_lock(&register_count_mutex);
-	if (register_count) {
+	mutex_lock(&register_done_mutex);
+	if (completion_done(&register_done)) {
 		acpi_bus_unregister_driver(&acpi_video_bus);
 		acpi_bus_unregister_driver(&acpi_video_bus);
-		register_count = 0;
+		reinit_completion(&register_done);
 	}
 	}
-	mutex_unlock(&register_count_mutex);
+	mutex_unlock(&register_done_mutex);
 }
 }
 EXPORT_SYMBOL(acpi_video_unregister);
 EXPORT_SYMBOL(acpi_video_unregister);
 
 
@@ -2062,15 +2094,29 @@ void acpi_video_unregister_backlight(void)
 {
 {
 	struct acpi_video_bus *video;
 	struct acpi_video_bus *video;
 
 
-	mutex_lock(&register_count_mutex);
-	if (register_count) {
+	mutex_lock(&register_done_mutex);
+	if (completion_done(&register_done)) {
 		mutex_lock(&video_list_lock);
 		mutex_lock(&video_list_lock);
 		list_for_each_entry(video, &video_bus_head, entry)
 		list_for_each_entry(video, &video_bus_head, entry)
 			acpi_video_bus_unregister_backlight(video);
 			acpi_video_bus_unregister_backlight(video);
 		mutex_unlock(&video_list_lock);
 		mutex_unlock(&video_list_lock);
 	}
 	}
-	mutex_unlock(&register_count_mutex);
+	mutex_unlock(&register_done_mutex);
+}
+
+bool acpi_video_handles_brightness_key_presses(void)
+{
+	bool have_video_busses;
+
+	wait_for_completion(&register_done);
+	mutex_lock(&video_list_lock);
+	have_video_busses = !list_empty(&video_bus_head);
+	mutex_unlock(&video_list_lock);
+
+	return have_video_busses &&
+	       (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
 }
 }
+EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
 
 
 /*
 /*
  * This is kind of nasty. Hardware using Intel chipsets may require
  * This is kind of nasty. Hardware using Intel chipsets may require

+ 2 - 2
drivers/acpi/acpica/Makefile

@@ -50,6 +50,7 @@ acpi-y +=		\
 	exdump.o	\
 	exdump.o	\
 	exfield.o	\
 	exfield.o	\
 	exfldio.o	\
 	exfldio.o	\
+	exmisc.o	\
 	exmutex.o	\
 	exmutex.o	\
 	exnames.o	\
 	exnames.o	\
 	exoparg1.o	\
 	exoparg1.o	\
@@ -57,7 +58,6 @@ acpi-y +=		\
 	exoparg3.o	\
 	exoparg3.o	\
 	exoparg6.o	\
 	exoparg6.o	\
 	exprep.o	\
 	exprep.o	\
-	exmisc.o	\
 	exregion.o	\
 	exregion.o	\
 	exresnte.o	\
 	exresnte.o	\
 	exresolv.o	\
 	exresolv.o	\
@@ -66,6 +66,7 @@ acpi-y +=		\
 	exstoren.o	\
 	exstoren.o	\
 	exstorob.o	\
 	exstorob.o	\
 	exsystem.o	\
 	exsystem.o	\
+	extrace.o	\
 	exutils.o
 	exutils.o
 
 
 acpi-y +=		\
 acpi-y +=		\
@@ -196,7 +197,6 @@ acpi-$(ACPI_FUTURE_USAGE) +=	\
 	dbfileio.o		\
 	dbfileio.o		\
 	dbtest.o		\
 	dbtest.o		\
 	utcache.o		\
 	utcache.o		\
-	utfileio.o		\
 	utprint.o		\
 	utprint.o		\
 	uttrack.o		\
 	uttrack.o		\
 	utuuid.o
 	utuuid.o

+ 32 - 26
drivers/acpi/acpica/acapps.h

@@ -44,6 +44,8 @@
 #ifndef _ACAPPS
 #ifndef _ACAPPS
 #define _ACAPPS
 #define _ACAPPS
 
 
+#include <stdio.h>
+
 /* Common info for tool signons */
 /* Common info for tool signons */
 
 
 #define ACPICA_NAME                 "Intel ACPI Component Architecture"
 #define ACPICA_NAME                 "Intel ACPI Component Architecture"
@@ -85,11 +87,40 @@
 	acpi_os_printf (description);
 	acpi_os_printf (description);
 
 
 #define ACPI_OPTION(name, description) \
 #define ACPI_OPTION(name, description) \
-	acpi_os_printf (" %-18s%s\n", name, description);
+	acpi_os_printf (" %-20s%s\n", name, description);
+
+/* Check for unexpected exceptions */
+
+#define ACPI_CHECK_STATUS(name, status, expected) \
+	if (status != expected) \
+	{ \
+		acpi_os_printf ("Unexpected %s from %s (%s-%d)\n", \
+			acpi_format_exception (status), #name, _acpi_module_name, __LINE__); \
+	}
+
+/* Check for unexpected non-AE_OK errors */
+
+#define ACPI_CHECK_OK(name, status)   ACPI_CHECK_STATUS (name, status, AE_OK);
 
 
 #define FILE_SUFFIX_DISASSEMBLY     "dsl"
 #define FILE_SUFFIX_DISASSEMBLY     "dsl"
 #define FILE_SUFFIX_BINARY_TABLE    ".dat"	/* Needs the dot */
 #define FILE_SUFFIX_BINARY_TABLE    ".dat"	/* Needs the dot */
 
 
+/* acfileio */
+
+acpi_status
+ac_get_all_tables_from_file(char *filename,
+			    u8 get_only_aml_tables,
+			    struct acpi_new_table_desc **return_list_head);
+
+u8 ac_is_file_binary(FILE * file);
+
+acpi_status ac_validate_table_header(FILE * file, long table_offset);
+
+/* Values for get_only_aml_tables */
+
+#define ACPI_GET_ONLY_AML_TABLES    TRUE
+#define ACPI_GET_ALL_TABLES         FALSE
+
 /*
 /*
  * getopt
  * getopt
  */
  */
@@ -107,30 +138,6 @@ extern char *acpi_gbl_optarg;
  */
  */
 u32 cm_get_file_size(ACPI_FILE file);
 u32 cm_get_file_size(ACPI_FILE file);
 
 
-#ifndef ACPI_DUMP_APP
-/*
- * adisasm
- */
-acpi_status
-ad_aml_disassemble(u8 out_to_file,
-		   char *filename, char *prefix, char **out_filename);
-
-void ad_print_statistics(void);
-
-acpi_status ad_find_dsdt(u8 **dsdt_ptr, u32 *dsdt_length);
-
-void ad_dump_tables(void);
-
-acpi_status ad_get_local_tables(void);
-
-acpi_status
-ad_parse_table(struct acpi_table_header *table,
-	       acpi_owner_id * owner_id, u8 load_table, u8 external);
-
-acpi_status ad_display_tables(char *filename, struct acpi_table_header *table);
-
-acpi_status ad_display_statistics(void);
-
 /*
 /*
  * adwalk
  * adwalk
  */
  */
@@ -168,6 +175,5 @@ char *ad_generate_filename(char *prefix, char *table_id);
 void
 void
 ad_write_table(struct acpi_table_header *table,
 ad_write_table(struct acpi_table_header *table,
 	       u32 length, char *table_name, char *oem_table_id);
 	       u32 length, char *table_name, char *oem_table_id);
-#endif
 
 
 #endif				/* _ACAPPS */
 #endif				/* _ACAPPS */

+ 26 - 15
drivers/acpi/acpica/acdebug.h

@@ -80,9 +80,15 @@ struct acpi_db_execute_walk {
 /*
 /*
  * dbxface - external debugger interfaces
  * dbxface - external debugger interfaces
  */
  */
-acpi_status
-acpi_db_single_step(struct acpi_walk_state *walk_state,
-		    union acpi_parse_object *op, u32 op_type);
+ACPI_DBR_DEPENDENT_RETURN_OK(acpi_status
+			     acpi_db_single_step(struct acpi_walk_state
+						 *walk_state,
+						 union acpi_parse_object *op,
+						 u32 op_type))
+ ACPI_DBR_DEPENDENT_RETURN_VOID(void
+				acpi_db_signal_break_point(struct
+							   acpi_walk_state
+							   *walk_state))
 
 
 /*
 /*
  * dbcmds - debug commands and output routines
  * dbcmds - debug commands and output routines
@@ -182,11 +188,15 @@ void acpi_db_display_method_info(union acpi_parse_object *op);
 
 
 void acpi_db_decode_and_display_object(char *target, char *output_type);
 void acpi_db_decode_and_display_object(char *target, char *output_type);
 
 
-void
-acpi_db_display_result_object(union acpi_operand_object *obj_desc,
-			      struct acpi_walk_state *walk_state);
+ACPI_DBR_DEPENDENT_RETURN_VOID(void
+			       acpi_db_display_result_object(union
+							     acpi_operand_object
+							     *obj_desc,
+							     struct
+							     acpi_walk_state
+							     *walk_state))
 
 
-acpi_status acpi_db_display_all_methods(char *display_count_arg);
+ acpi_status acpi_db_display_all_methods(char *display_count_arg);
 
 
 void acpi_db_display_arguments(void);
 void acpi_db_display_arguments(void);
 
 
@@ -198,9 +208,13 @@ void acpi_db_display_calling_tree(void);
 
 
 void acpi_db_display_object_type(char *object_arg);
 void acpi_db_display_object_type(char *object_arg);
 
 
-void
-acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
-				struct acpi_walk_state *walk_state);
+ACPI_DBR_DEPENDENT_RETURN_VOID(void
+			       acpi_db_display_argument_object(union
+							       acpi_operand_object
+							       *obj_desc,
+							       struct
+							       acpi_walk_state
+							       *walk_state))
 
 
 /*
 /*
  * dbexec - debugger control method execution
  * dbexec - debugger control method execution
@@ -231,10 +245,7 @@ void acpi_db_open_debug_file(char *name);
 
 
 acpi_status acpi_db_load_acpi_table(char *filename);
 acpi_status acpi_db_load_acpi_table(char *filename);
 
 
-acpi_status
-acpi_db_get_table_from_file(char *filename,
-			    struct acpi_table_header **table,
-			    u8 must_be_aml_table);
+acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head);
 
 
 /*
 /*
  * dbhistry - debugger HISTORY command
  * dbhistry - debugger HISTORY command
@@ -257,7 +268,7 @@ acpi_db_command_dispatch(char *input_buffer,
 
 
 void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
 void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
 
 
-acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op);
+acpi_status acpi_db_user_commands(void);
 
 
 char *acpi_db_get_next_token(char *string,
 char *acpi_db_get_next_token(char *string,
 			     char **next, acpi_object_type * return_type);
 			     char **next, acpi_object_type * return_type);

+ 9 - 2
drivers/acpi/acpica/acevents.h

@@ -161,6 +161,11 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 /*
 /*
  * evhandler - Address space handling
  * evhandler - Address space handling
  */
  */
+union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type
+						       space_id,
+						       union acpi_operand_object
+						       *handler_obj);
+
 u8
 u8
 acpi_ev_has_default_handler(struct acpi_namespace_node *node,
 acpi_ev_has_default_handler(struct acpi_namespace_node *node,
 			    acpi_adr_space_type space_id);
 			    acpi_adr_space_type space_id);
@@ -193,9 +198,11 @@ void
 acpi_ev_detach_region(union acpi_operand_object *region_obj,
 acpi_ev_detach_region(union acpi_operand_object *region_obj,
 		      u8 acpi_ns_is_locked);
 		      u8 acpi_ns_is_locked);
 
 
-acpi_status
+void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj);
+
+void
 acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
 acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
-			    acpi_adr_space_type space_id);
+			    acpi_adr_space_type space_id, u32 function);
 
 
 acpi_status
 acpi_status
 acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
 acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);

+ 2 - 6
drivers/acpi/acpica/acglobal.h

@@ -145,6 +145,7 @@ ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache);
 
 
 ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_early_initialization, TRUE);
 
 
 /* Global handlers */
 /* Global handlers */
 
 
@@ -164,7 +165,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
 
 
 /* Initialization sequencing */
 /* Initialization sequencing */
 
 
-ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_reg_methods_enabled, FALSE);
 
 
 /* Misc */
 /* Misc */
 
 
@@ -326,7 +327,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
 #ifdef ACPI_DEBUGGER
 #ifdef ACPI_DEBUGGER
 
 
 ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
-ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
 ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
 ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
 
 
 ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
 ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
@@ -345,7 +345,6 @@ ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
 
 
 /* These buffers should all be the same size */
 /* These buffers should all be the same size */
 
 
-ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
 ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
@@ -360,9 +359,6 @@ ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
 ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
 ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
 ACPI_GLOBAL(u32, acpi_gbl_num_objects);
 ACPI_GLOBAL(u32, acpi_gbl_num_objects);
 
 
-ACPI_GLOBAL(acpi_mutex, acpi_gbl_db_command_ready);
-ACPI_GLOBAL(acpi_mutex, acpi_gbl_db_command_complete);
-
 #endif				/* ACPI_DEBUGGER */
 #endif				/* ACPI_DEBUGGER */
 
 
 /*****************************************************************************
 /*****************************************************************************

+ 11 - 1
drivers/acpi/acpica/aclocal.h

@@ -219,6 +219,13 @@ struct acpi_table_list {
 #define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
 #define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
 #define ACPI_ROOT_ALLOW_RESIZE          (2)
 #define ACPI_ROOT_ALLOW_RESIZE          (2)
 
 
+/* List to manage incoming ACPI tables */
+
+struct acpi_new_table_desc {
+	struct acpi_table_header *table;
+	struct acpi_new_table_desc *next;
+};
+
 /* Predefined table indexes */
 /* Predefined table indexes */
 
 
 #define ACPI_INVALID_TABLE_INDEX        (0xFFFFFFFF)
 #define ACPI_INVALID_TABLE_INDEX        (0xFFFFFFFF)
@@ -388,7 +395,8 @@ union acpi_predefined_info {
 
 
 /* Return object auto-repair info */
 /* Return object auto-repair info */
 
 
-typedef acpi_status(*acpi_object_converter) (union acpi_operand_object
+typedef acpi_status(*acpi_object_converter) (struct acpi_namespace_node * scope,
+					     union acpi_operand_object
 					     *original_object,
 					     *original_object,
 					     union acpi_operand_object
 					     union acpi_operand_object
 					     **converted_object);
 					     **converted_object);
@@ -420,6 +428,7 @@ struct acpi_simple_repair_info {
 
 
 struct acpi_reg_walk_info {
 struct acpi_reg_walk_info {
 	acpi_adr_space_type space_id;
 	acpi_adr_space_type space_id;
+	u32 function;
 	u32 reg_run_count;
 	u32 reg_run_count;
 };
 };
 
 
@@ -861,6 +870,7 @@ struct acpi_parse_state {
 #define ACPI_PARSEOP_CLOSING_PAREN      0x10
 #define ACPI_PARSEOP_CLOSING_PAREN      0x10
 #define ACPI_PARSEOP_COMPOUND           0x20
 #define ACPI_PARSEOP_COMPOUND           0x20
 #define ACPI_PARSEOP_ASSIGNMENT         0x40
 #define ACPI_PARSEOP_ASSIGNMENT         0x40
+#define ACPI_PARSEOP_ELSEIF             0x80
 
 
 /*****************************************************************************
 /*****************************************************************************
  *
  *

+ 0 - 11
drivers/acpi/acpica/acmacros.h

@@ -400,17 +400,6 @@
 #define ACPI_HW_OPTIONAL_FUNCTION(addr)     NULL
 #define ACPI_HW_OPTIONAL_FUNCTION(addr)     NULL
 #endif
 #endif
 
 
-/*
- * Some code only gets executed when the debugger is built in.
- * Note that this is entirely independent of whether the
- * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not.
- */
-#ifdef ACPI_DEBUGGER
-#define ACPI_DEBUGGER_EXEC(a)           a
-#else
-#define ACPI_DEBUGGER_EXEC(a)
-#endif
-
 /*
 /*
  * Macros used for ACPICA utilities only
  * Macros used for ACPICA utilities only
  */
  */

+ 10 - 2
drivers/acpi/acpica/acnamesp.h

@@ -77,6 +77,7 @@
 /* Object is not a package element */
 /* Object is not a package element */
 
 
 #define ACPI_NOT_PACKAGE_ELEMENT    ACPI_UINT32_MAX
 #define ACPI_NOT_PACKAGE_ELEMENT    ACPI_UINT32_MAX
+#define ACPI_ALL_PACKAGE_ELEMENTS   (ACPI_UINT32_MAX-1)
 
 
 /* Always emit warning message, not dependent on node flags */
 /* Always emit warning message, not dependent on node flags */
 
 
@@ -183,13 +184,20 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
 			  union acpi_operand_object **return_object);
 			  union acpi_operand_object **return_object);
 
 
 acpi_status
 acpi_status
-acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
+acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope,
+			   union acpi_operand_object *original_object,
 			   union acpi_operand_object **return_object);
 			   union acpi_operand_object **return_object);
 
 
 acpi_status
 acpi_status
-acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
+acpi_ns_convert_to_resource(struct acpi_namespace_node *scope,
+			    union acpi_operand_object *original_object,
 			    union acpi_operand_object **return_object);
 			    union acpi_operand_object **return_object);
 
 
+acpi_status
+acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
+			     union acpi_operand_object *original_object,
+			     union acpi_operand_object **return_object);
+
 /*
 /*
  * nsdump - Namespace dump/print utilities
  * nsdump - Namespace dump/print utilities
  */
  */

+ 4 - 3
drivers/acpi/acpica/acobject.h

@@ -93,9 +93,10 @@
 #define AOPOBJ_AML_CONSTANT         0x01	/* Integer is an AML constant */
 #define AOPOBJ_AML_CONSTANT         0x01	/* Integer is an AML constant */
 #define AOPOBJ_STATIC_POINTER       0x02	/* Data is part of an ACPI table, don't delete */
 #define AOPOBJ_STATIC_POINTER       0x02	/* Data is part of an ACPI table, don't delete */
 #define AOPOBJ_DATA_VALID           0x04	/* Object is initialized and data is valid */
 #define AOPOBJ_DATA_VALID           0x04	/* Object is initialized and data is valid */
-#define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized, _REG was run */
-#define AOPOBJ_SETUP_COMPLETE       0x10	/* Region setup is complete */
-#define AOPOBJ_INVALID              0x20	/* Host OS won't allow a Region address */
+#define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized */
+#define AOPOBJ_REG_CONNECTED        0x10	/* _REG was run */
+#define AOPOBJ_SETUP_COMPLETE       0x20	/* Region setup is complete */
+#define AOPOBJ_INVALID              0x40	/* Host OS won't allow a Region address */
 
 
 /******************************************************************************
 /******************************************************************************
  *
  *

+ 5 - 5
drivers/acpi/acpica/acopcode.h

@@ -92,7 +92,7 @@
 #define ARGP_BYTELIST_OP                ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_BYTELIST_OP                ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_CONCAT_OP                  ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_CONCAT_OP                  ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_CONCAT_RES_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_CONCAT_RES_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
-#define ARGP_COND_REF_OF_OP             ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_SUPERNAME)
+#define ARGP_COND_REF_OF_OP             ARGP_LIST2 (ARGP_NAME_OR_REF,ARGP_TARGET)
 #define ARGP_CONNECTFIELD_OP            ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_CONNECTFIELD_OP            ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_CONTINUE_OP                ARG_NONE
 #define ARGP_CONTINUE_OP                ARG_NONE
 #define ARGP_COPY_OP                    ARGP_LIST2 (ARGP_TERMARG,    ARGP_SIMPLENAME)
 #define ARGP_COPY_OP                    ARGP_LIST2 (ARGP_TERMARG,    ARGP_SIMPLENAME)
@@ -152,13 +152,14 @@
 #define ARGP_NAMEPATH_OP                ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_NAMEPATH_OP                ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_NOOP_OP                    ARG_NONE
 #define ARGP_NOOP_OP                    ARG_NONE
 #define ARGP_NOTIFY_OP                  ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_TERMARG)
 #define ARGP_NOTIFY_OP                  ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_TERMARG)
+#define ARGP_OBJECT_TYPE_OP             ARGP_LIST1 (ARGP_NAME_OR_REF)
 #define ARGP_ONE_OP                     ARG_NONE
 #define ARGP_ONE_OP                     ARG_NONE
 #define ARGP_ONES_OP                    ARG_NONE
 #define ARGP_ONES_OP                    ARG_NONE
 #define ARGP_PACKAGE_OP                 ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_BYTEDATA,      ARGP_DATAOBJLIST)
 #define ARGP_PACKAGE_OP                 ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_BYTEDATA,      ARGP_DATAOBJLIST)
 #define ARGP_POWER_RES_OP               ARGP_LIST5 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_BYTEDATA,  ARGP_WORDDATA,  ARGP_OBJLIST)
 #define ARGP_POWER_RES_OP               ARGP_LIST5 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_BYTEDATA,  ARGP_WORDDATA,  ARGP_OBJLIST)
 #define ARGP_PROCESSOR_OP               ARGP_LIST6 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_BYTEDATA,  ARGP_DWORDDATA, ARGP_BYTEDATA,  ARGP_OBJLIST)
 #define ARGP_PROCESSOR_OP               ARGP_LIST6 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_BYTEDATA,  ARGP_DWORDDATA, ARGP_BYTEDATA,  ARGP_OBJLIST)
 #define ARGP_QWORD_OP                   ARGP_LIST1 (ARGP_QWORDDATA)
 #define ARGP_QWORD_OP                   ARGP_LIST1 (ARGP_QWORDDATA)
-#define ARGP_REF_OF_OP                  ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_REF_OF_OP                  ARGP_LIST1 (ARGP_NAME_OR_REF)
 #define ARGP_REGION_OP                  ARGP_LIST4 (ARGP_NAME,       ARGP_BYTEDATA,      ARGP_TERMARG,   ARGP_TERMARG)
 #define ARGP_REGION_OP                  ARGP_LIST4 (ARGP_NAME,       ARGP_BYTEDATA,      ARGP_TERMARG,   ARGP_TERMARG)
 #define ARGP_RELEASE_OP                 ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGP_RELEASE_OP                 ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGP_RESERVEDFIELD_OP           ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_RESERVEDFIELD_OP           ARGP_LIST1 (ARGP_NAMESTRING)
@@ -185,7 +186,6 @@
 #define ARGP_TO_HEX_STR_OP              ARGP_LIST2 (ARGP_TERMARG,    ARGP_TARGET)
 #define ARGP_TO_HEX_STR_OP              ARGP_LIST2 (ARGP_TERMARG,    ARGP_TARGET)
 #define ARGP_TO_INTEGER_OP              ARGP_LIST2 (ARGP_TERMARG,    ARGP_TARGET)
 #define ARGP_TO_INTEGER_OP              ARGP_LIST2 (ARGP_TERMARG,    ARGP_TARGET)
 #define ARGP_TO_STRING_OP               ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_TO_STRING_OP               ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
-#define ARGP_TYPE_OP                    ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGP_UNLOAD_OP                  ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGP_UNLOAD_OP                  ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGP_VAR_PACKAGE_OP             ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_TERMARG,       ARGP_DATAOBJLIST)
 #define ARGP_VAR_PACKAGE_OP             ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_TERMARG,       ARGP_DATAOBJLIST)
 #define ARGP_WAIT_OP                    ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_TERMARG)
 #define ARGP_WAIT_OP                    ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_TERMARG)
@@ -223,7 +223,7 @@
 #define ARGI_BUFFER_OP                  ARGI_LIST1 (ARGI_INTEGER)
 #define ARGI_BUFFER_OP                  ARGI_LIST1 (ARGI_INTEGER)
 #define ARGI_BYTE_OP                    ARGI_INVALID_OPCODE
 #define ARGI_BYTE_OP                    ARGI_INVALID_OPCODE
 #define ARGI_BYTELIST_OP                ARGI_INVALID_OPCODE
 #define ARGI_BYTELIST_OP                ARGI_INVALID_OPCODE
-#define ARGI_CONCAT_OP                  ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA,   ARGI_TARGETREF)
+#define ARGI_CONCAT_OP                  ARGI_LIST3 (ARGI_ANYTYPE,    ARGI_ANYTYPE,       ARGI_TARGETREF)
 #define ARGI_CONCAT_RES_OP              ARGI_LIST3 (ARGI_BUFFER,     ARGI_BUFFER,        ARGI_TARGETREF)
 #define ARGI_CONCAT_RES_OP              ARGI_LIST3 (ARGI_BUFFER,     ARGI_BUFFER,        ARGI_TARGETREF)
 #define ARGI_COND_REF_OF_OP             ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
 #define ARGI_COND_REF_OF_OP             ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
 #define ARGI_CONNECTFIELD_OP            ARGI_INVALID_OPCODE
 #define ARGI_CONNECTFIELD_OP            ARGI_INVALID_OPCODE
@@ -285,6 +285,7 @@
 #define ARGI_NAMEPATH_OP                ARGI_INVALID_OPCODE
 #define ARGI_NAMEPATH_OP                ARGI_INVALID_OPCODE
 #define ARGI_NOOP_OP                    ARG_NONE
 #define ARGI_NOOP_OP                    ARG_NONE
 #define ARGI_NOTIFY_OP                  ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER)
 #define ARGI_NOTIFY_OP                  ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER)
+#define ARGI_OBJECT_TYPE_OP             ARGI_LIST1 (ARGI_ANYTYPE)
 #define ARGI_ONE_OP                     ARG_NONE
 #define ARGI_ONE_OP                     ARG_NONE
 #define ARGI_ONES_OP                    ARG_NONE
 #define ARGI_ONES_OP                    ARG_NONE
 #define ARGI_PACKAGE_OP                 ARGI_LIST1 (ARGI_INTEGER)
 #define ARGI_PACKAGE_OP                 ARGI_LIST1 (ARGI_INTEGER)
@@ -318,7 +319,6 @@
 #define ARGI_TO_HEX_STR_OP              ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
 #define ARGI_TO_HEX_STR_OP              ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
 #define ARGI_TO_INTEGER_OP              ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
 #define ARGI_TO_INTEGER_OP              ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
 #define ARGI_TO_STRING_OP               ARGI_LIST3 (ARGI_BUFFER,     ARGI_INTEGER,       ARGI_FIXED_TARGET)
 #define ARGI_TO_STRING_OP               ARGI_LIST3 (ARGI_BUFFER,     ARGI_INTEGER,       ARGI_FIXED_TARGET)
-#define ARGI_TYPE_OP                    ARGI_LIST1 (ARGI_ANYTYPE)
 #define ARGI_UNLOAD_OP                  ARGI_LIST1 (ARGI_DDBHANDLE)
 #define ARGI_UNLOAD_OP                  ARGI_LIST1 (ARGI_DDBHANDLE)
 #define ARGI_VAR_PACKAGE_OP             ARGI_LIST1 (ARGI_INTEGER)
 #define ARGI_VAR_PACKAGE_OP             ARGI_LIST1 (ARGI_INTEGER)
 #define ARGI_WAIT_OP                    ARGI_LIST2 (ARGI_EVENT,      ARGI_INTEGER)
 #define ARGI_WAIT_OP                    ARGI_LIST2 (ARGI_EVENT,      ARGI_INTEGER)

+ 7 - 1
drivers/acpi/acpica/acparser.h

@@ -92,7 +92,13 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
 acpi_status
 acpi_status
 acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
 acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
 			  struct acpi_parse_state *parser_state,
 			  struct acpi_parse_state *parser_state,
-			  union acpi_parse_object *arg, u8 method_call);
+			  union acpi_parse_object *arg,
+			  u8 possible_method_call);
+
+/* Values for u8 above */
+
+#define ACPI_NOT_METHOD_CALL            FALSE
+#define ACPI_POSSIBLE_METHOD_CALL       TRUE
 
 
 acpi_status
 acpi_status
 acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
 acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,

+ 7 - 19
drivers/acpi/acpica/acutils.h

@@ -184,24 +184,24 @@ acpi_status acpi_ut_init_globals(void);
 
 
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
 
 
-char *acpi_ut_get_mutex_name(u32 mutex_id);
+const char *acpi_ut_get_mutex_name(u32 mutex_id);
 
 
 const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
 const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
 #endif
 #endif
 
 
-char *acpi_ut_get_type_name(acpi_object_type type);
+const char *acpi_ut_get_type_name(acpi_object_type type);
 
 
-char *acpi_ut_get_node_name(void *object);
+const char *acpi_ut_get_node_name(void *object);
 
 
-char *acpi_ut_get_descriptor_name(void *object);
+const char *acpi_ut_get_descriptor_name(void *object);
 
 
 const char *acpi_ut_get_reference_name(union acpi_operand_object *object);
 const char *acpi_ut_get_reference_name(union acpi_operand_object *object);
 
 
-char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc);
+const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc);
 
 
-char *acpi_ut_get_region_name(u8 space_id);
+const char *acpi_ut_get_region_name(u8 space_id);
 
 
-char *acpi_ut_get_event_name(u32 event_id);
+const char *acpi_ut_get_event_name(u32 event_id);
 
 
 char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
 char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
 
 
@@ -352,14 +352,6 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
 			      const char **method_names,
 			      const char **method_names,
 			      u8 method_count, u8 *out_values);
 			      u8 method_count, u8 *out_values);
 
 
-/*
- * utfileio - file operations
- */
-#ifdef ACPI_APPLICATION
-acpi_status
-acpi_ut_read_table_from_file(char *filename, struct acpi_table_header **table);
-#endif
-
 /*
 /*
  * utids - device ID support
  * utids - device ID support
  */
  */
@@ -371,10 +363,6 @@ acpi_status
 acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
 acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
 		    struct acpi_pnp_device_id ** return_id);
 		    struct acpi_pnp_device_id ** return_id);
 
 
-acpi_status
-acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
-		    struct acpi_pnp_device_id **return_id);
-
 acpi_status
 acpi_status
 acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
 acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
 		    struct acpi_pnp_device_id_list ** return_cid_list);
 		    struct acpi_pnp_device_id_list ** return_cid_list);

+ 3 - 2
drivers/acpi/acpica/amlcode.h

@@ -120,7 +120,7 @@
 #define AML_CREATE_WORD_FIELD_OP    (u16) 0x8b
 #define AML_CREATE_WORD_FIELD_OP    (u16) 0x8b
 #define AML_CREATE_BYTE_FIELD_OP    (u16) 0x8c
 #define AML_CREATE_BYTE_FIELD_OP    (u16) 0x8c
 #define AML_CREATE_BIT_FIELD_OP     (u16) 0x8d
 #define AML_CREATE_BIT_FIELD_OP     (u16) 0x8d
-#define AML_TYPE_OP                 (u16) 0x8e
+#define AML_OBJECT_TYPE_OP          (u16) 0x8e
 #define AML_CREATE_QWORD_FIELD_OP   (u16) 0x8f	/* ACPI 2.0 */
 #define AML_CREATE_QWORD_FIELD_OP   (u16) 0x8f	/* ACPI 2.0 */
 #define AML_LAND_OP                 (u16) 0x90
 #define AML_LAND_OP                 (u16) 0x90
 #define AML_LOR_OP                  (u16) 0x91
 #define AML_LOR_OP                  (u16) 0x91
@@ -238,7 +238,8 @@
 #define ARGP_TERMLIST               0x0F
 #define ARGP_TERMLIST               0x0F
 #define ARGP_WORDDATA               0x10
 #define ARGP_WORDDATA               0x10
 #define ARGP_QWORDDATA              0x11
 #define ARGP_QWORDDATA              0x11
-#define ARGP_SIMPLENAME             0x12
+#define ARGP_SIMPLENAME             0x12	/* name_string | local_term | arg_term */
+#define ARGP_NAME_OR_REF            0x13	/* For object_type only */
 
 
 /*
 /*
  * Resolved argument types for the AML Interpreter
  * Resolved argument types for the AML Interpreter

+ 3 - 8
drivers/acpi/acpica/dbcmds.c

@@ -798,7 +798,7 @@ acpi_db_device_resources(acpi_handle obj_handle,
 	acpi_status status;
 	acpi_status status;
 
 
 	node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
 	node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
-	parent_path = acpi_ns_get_external_pathname(node);
+	parent_path = acpi_ns_get_normalized_pathname(node, TRUE);
 	if (!parent_path) {
 	if (!parent_path) {
 		return (AE_NO_MEMORY);
 		return (AE_NO_MEMORY);
 	}
 	}
@@ -1131,13 +1131,8 @@ void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg)
 	u32 debug_layer = 0;
 	u32 debug_layer = 0;
 	u32 flags = 0;
 	u32 flags = 0;
 
 
-	if (enable_arg) {
-		acpi_ut_strupr(enable_arg);
-	}
-
-	if (once_arg) {
-		acpi_ut_strupr(once_arg);
-	}
+	acpi_ut_strupr(enable_arg);
+	acpi_ut_strupr(once_arg);
 
 
 	if (method_arg) {
 	if (method_arg) {
 		if (acpi_db_trace_method_name) {
 		if (acpi_db_trace_method_name) {

+ 46 - 50
drivers/acpi/acpica/dbdisply.c

@@ -48,6 +48,7 @@
 #include "acnamesp.h"
 #include "acnamesp.h"
 #include "acparser.h"
 #include "acparser.h"
 #include "acinterp.h"
 #include "acinterp.h"
+#include "acevents.h"
 #include "acdebug.h"
 #include "acdebug.h"
 
 
 #define _COMPONENT          ACPI_CA_DEBUGGER
 #define _COMPONENT          ACPI_CA_DEBUGGER
@@ -588,7 +589,7 @@ void acpi_db_display_calling_tree(void)
  *
  *
  * FUNCTION:    acpi_db_display_object_type
  * FUNCTION:    acpi_db_display_object_type
  *
  *
- * PARAMETERS:  name            - User entered NS node handle or name
+ * PARAMETERS:  object_arg      - User entered NS node handle
  *
  *
  * RETURN:      None
  * RETURN:      None
  *
  *
@@ -596,44 +597,34 @@ void acpi_db_display_calling_tree(void)
  *
  *
  ******************************************************************************/
  ******************************************************************************/
 
 
-void acpi_db_display_object_type(char *name)
+void acpi_db_display_object_type(char *object_arg)
 {
 {
-	struct acpi_namespace_node *node;
+	acpi_handle handle;
 	struct acpi_device_info *info;
 	struct acpi_device_info *info;
 	acpi_status status;
 	acpi_status status;
 	u32 i;
 	u32 i;
 
 
-	node = acpi_db_convert_to_node(name);
-	if (!node) {
-		return;
-	}
+	handle = ACPI_TO_POINTER(strtoul(object_arg, NULL, 16));
 
 
-	status = acpi_get_object_info(ACPI_CAST_PTR(acpi_handle, node), &info);
+	status = acpi_get_object_info(handle, &info);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		acpi_os_printf("Could not get object info, %s\n",
 		acpi_os_printf("Could not get object info, %s\n",
 			       acpi_format_exception(status));
 			       acpi_format_exception(status));
 		return;
 		return;
 	}
 	}
 
 
-	if (info->valid & ACPI_VALID_ADR) {
-		acpi_os_printf("ADR: %8.8X%8.8X, STA: %8.8X, Flags: %X\n",
-			       ACPI_FORMAT_UINT64(info->address),
-			       info->current_status, info->flags);
-	}
-	if (info->valid & ACPI_VALID_SXDS) {
-		acpi_os_printf("S1D-%2.2X S2D-%2.2X S3D-%2.2X S4D-%2.2X\n",
-			       info->highest_dstates[0],
-			       info->highest_dstates[1],
-			       info->highest_dstates[2],
-			       info->highest_dstates[3]);
-	}
-	if (info->valid & ACPI_VALID_SXWS) {
-		acpi_os_printf
-		    ("S0W-%2.2X S1W-%2.2X S2W-%2.2X S3W-%2.2X S4W-%2.2X\n",
-		     info->lowest_dstates[0], info->lowest_dstates[1],
-		     info->lowest_dstates[2], info->lowest_dstates[3],
-		     info->lowest_dstates[4]);
-	}
+	acpi_os_printf("ADR: %8.8X%8.8X, STA: %8.8X, Flags: %X\n",
+		       ACPI_FORMAT_UINT64(info->address),
+		       info->current_status, info->flags);
+
+	acpi_os_printf("S1D-%2.2X S2D-%2.2X S3D-%2.2X S4D-%2.2X\n",
+		       info->highest_dstates[0], info->highest_dstates[1],
+		       info->highest_dstates[2], info->highest_dstates[3]);
+
+	acpi_os_printf("S0W-%2.2X S1W-%2.2X S2W-%2.2X S3W-%2.2X S4W-%2.2X\n",
+		       info->lowest_dstates[0], info->lowest_dstates[1],
+		       info->lowest_dstates[2], info->lowest_dstates[3],
+		       info->lowest_dstates[4]);
 
 
 	if (info->valid & ACPI_VALID_HID) {
 	if (info->valid & ACPI_VALID_HID) {
 		acpi_os_printf("HID: %s\n", info->hardware_id.string);
 		acpi_os_printf("HID: %s\n", info->hardware_id.string);
@@ -643,10 +634,6 @@ void acpi_db_display_object_type(char *name)
 		acpi_os_printf("UID: %s\n", info->unique_id.string);
 		acpi_os_printf("UID: %s\n", info->unique_id.string);
 	}
 	}
 
 
-	if (info->valid & ACPI_VALID_SUB) {
-		acpi_os_printf("SUB: %s\n", info->subsystem_id.string);
-	}
-
 	if (info->valid & ACPI_VALID_CID) {
 	if (info->valid & ACPI_VALID_CID) {
 		for (i = 0; i < info->compatible_id_list.count; i++) {
 		for (i = 0; i < info->compatible_id_list.count; i++) {
 			acpi_os_printf("CID %u: %s\n", i,
 			acpi_os_printf("CID %u: %s\n", i,
@@ -679,6 +666,12 @@ acpi_db_display_result_object(union acpi_operand_object *obj_desc,
 			      struct acpi_walk_state *walk_state)
 			      struct acpi_walk_state *walk_state)
 {
 {
 
 
+#ifndef ACPI_APPLICATION
+	if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) {
+		return;
+	}
+#endif
+
 	/* Only display if single stepping */
 	/* Only display if single stepping */
 
 
 	if (!acpi_gbl_cm_single_step) {
 	if (!acpi_gbl_cm_single_step) {
@@ -708,6 +701,12 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
 				struct acpi_walk_state *walk_state)
 				struct acpi_walk_state *walk_state)
 {
 {
 
 
+#ifndef ACPI_APPLICATION
+	if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) {
+		return;
+	}
+#endif
+
 	if (!acpi_gbl_cm_single_step) {
 	if (!acpi_gbl_cm_single_step) {
 		return;
 		return;
 	}
 	}
@@ -951,28 +950,25 @@ void acpi_db_display_handlers(void)
 	if (obj_desc) {
 	if (obj_desc) {
 		for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_gbl_space_id_list); i++) {
 		for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_gbl_space_id_list); i++) {
 			space_id = acpi_gbl_space_id_list[i];
 			space_id = acpi_gbl_space_id_list[i];
-			handler_obj = obj_desc->device.handler;
 
 
 			acpi_os_printf(ACPI_PREDEFINED_PREFIX,
 			acpi_os_printf(ACPI_PREDEFINED_PREFIX,
 				       acpi_ut_get_region_name((u8)space_id),
 				       acpi_ut_get_region_name((u8)space_id),
 				       space_id);
 				       space_id);
 
 
-			while (handler_obj) {
-				if (acpi_gbl_space_id_list[i] ==
-				    handler_obj->address_space.space_id) {
-					acpi_os_printf
-					    (ACPI_HANDLER_PRESENT_STRING,
-					     (handler_obj->address_space.
-					      handler_flags &
-					      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)
-					     ? "Default" : "User",
-					     handler_obj->address_space.
-					     handler);
-
-					goto found_handler;
-				}
+			handler_obj =
+			    acpi_ev_find_region_handler(space_id,
+							obj_desc->common_notify.
+							handler);
+			if (handler_obj) {
+				acpi_os_printf(ACPI_HANDLER_PRESENT_STRING,
+					       (handler_obj->address_space.
+						handler_flags &
+						ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)
+					       ? "Default" : "User",
+					       handler_obj->address_space.
+					       handler);
 
 
-				handler_obj = handler_obj->address_space.next;
+				goto found_handler;
 			}
 			}
 
 
 			/* There is no handler for this space_id */
 			/* There is no handler for this space_id */
@@ -984,7 +980,7 @@ found_handler:		;
 
 
 		/* Find all handlers for user-defined space_IDs */
 		/* Find all handlers for user-defined space_IDs */
 
 
-		handler_obj = obj_desc->device.handler;
+		handler_obj = obj_desc->common_notify.handler;
 		while (handler_obj) {
 		while (handler_obj) {
 			if (handler_obj->address_space.space_id >=
 			if (handler_obj->address_space.space_id >=
 			    ACPI_USER_REGION_BEGIN) {
 			    ACPI_USER_REGION_BEGIN) {
@@ -1079,14 +1075,14 @@ acpi_db_display_non_root_handlers(acpi_handle obj_handle,
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}
 
 
-	pathname = acpi_ns_get_external_pathname(node);
+	pathname = acpi_ns_get_normalized_pathname(node, TRUE);
 	if (!pathname) {
 	if (!pathname) {
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}
 
 
 	/* Display all handlers associated with this device */
 	/* Display all handlers associated with this device */
 
 
-	handler_obj = obj_desc->device.handler;
+	handler_obj = obj_desc->common_notify.handler;
 	while (handler_obj) {
 	while (handler_obj) {
 		acpi_os_printf(ACPI_PREDEFINED_PREFIX,
 		acpi_os_printf(ACPI_PREDEFINED_PREFIX,
 			       acpi_ut_get_region_name((u8)handler_obj->
 			       acpi_ut_get_region_name((u8)handler_obj->

+ 15 - 108
drivers/acpi/acpica/dbfileio.c

@@ -46,6 +46,10 @@
 #include "accommon.h"
 #include "accommon.h"
 #include "acdebug.h"
 #include "acdebug.h"
 #include "actables.h"
 #include "actables.h"
+#include <stdio.h>
+#ifdef ACPI_APPLICATION
+#include "acapps.h"
+#endif
 
 
 #define _COMPONENT          ACPI_CA_DEBUGGER
 #define _COMPONENT          ACPI_CA_DEBUGGER
 ACPI_MODULE_NAME("dbfileio")
 ACPI_MODULE_NAME("dbfileio")
@@ -110,122 +114,31 @@ void acpi_db_open_debug_file(char *name)
 }
 }
 #endif
 #endif
 
 
-#ifdef ACPI_APPLICATION
-#include "acapps.h"
-
-/*******************************************************************************
- *
- * FUNCTION:    ae_local_load_table
- *
- * PARAMETERS:  table           - pointer to a buffer containing the entire
- *                                table to be loaded
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to load a table from the caller's
- *              buffer. The buffer must contain an entire ACPI Table including
- *              a valid header. The header fields will be verified, and if it
- *              is determined that the table is invalid, the call will fail.
- *
- ******************************************************************************/
-
-static acpi_status ae_local_load_table(struct acpi_table_header *table)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ae_local_load_table);
-
-#if 0
-/*    struct acpi_table_desc          table_info; */
-
-	if (!table) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	table_info.pointer = table;
-	status = acpi_tb_recognize_table(&table_info, ACPI_TABLE_ALL);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Install the new table into the local data structures */
-
-	status = acpi_tb_init_table_descriptor(&table_info);
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_ALREADY_EXISTS) {
-
-			/* Table already exists, no error */
-
-			status = AE_OK;
-		}
-
-		/* Free table allocated by acpi_tb_get_table */
-
-		acpi_tb_delete_single_table(&table_info);
-		return_ACPI_STATUS(status);
-	}
-#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
-
-	status =
-	    acpi_ns_load_table(table_info.installed_desc, acpi_gbl_root_node);
-	if (ACPI_FAILURE(status)) {
-
-		/* Uninstall table and free the buffer */
-
-		acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_DSDT);
-		return_ACPI_STATUS(status);
-	}
-#endif
-#endif
-
-	return_ACPI_STATUS(status);
-}
-#endif
-
 /*******************************************************************************
 /*******************************************************************************
  *
  *
- * FUNCTION:    acpi_db_get_table_from_file
+ * FUNCTION:    acpi_db_load_tables
  *
  *
- * PARAMETERS:  filename        - File where table is located
- *              return_table    - Where a pointer to the table is returned
+ * PARAMETERS:  list_head       - List of ACPI tables to load
  *
  *
  * RETURN:      Status
  * RETURN:      Status
  *
  *
- * DESCRIPTION: Load an ACPI table from a file
+ * DESCRIPTION: Load ACPI tables from a previously constructed table list.
  *
  *
  ******************************************************************************/
  ******************************************************************************/
 
 
-acpi_status
-acpi_db_get_table_from_file(char *filename,
-			    struct acpi_table_header **return_table,
-			    u8 must_be_aml_file)
+acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head)
 {
 {
-#ifdef ACPI_APPLICATION
 	acpi_status status;
 	acpi_status status;
+	struct acpi_new_table_desc *table_list_head;
 	struct acpi_table_header *table;
 	struct acpi_table_header *table;
-	u8 is_aml_table = TRUE;
-
-	status = acpi_ut_read_table_from_file(filename, &table);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	if (must_be_aml_file) {
-		is_aml_table = acpi_ut_is_aml_table(table);
-		if (!is_aml_table) {
-			ACPI_EXCEPTION((AE_INFO, AE_OK,
-					"Input for -e is not an AML table: "
-					"\"%4.4s\" (must be DSDT/SSDT)",
-					table->signature));
-			return (AE_TYPE);
-		}
-	}
 
 
-	if (is_aml_table) {
+	/* Load all ACPI tables in the list */
 
 
-		/* Attempt to recognize and install the table */
+	table_list_head = list_head;
+	while (table_list_head) {
+		table = table_list_head->table;
 
 
-		status = ae_local_load_table(table);
+		status = acpi_load_table(table);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			if (status == AE_ALREADY_EXISTS) {
 			if (status == AE_ALREADY_EXISTS) {
 				acpi_os_printf
 				acpi_os_printf
@@ -239,18 +152,12 @@ acpi_db_get_table_from_file(char *filename,
 			return (status);
 			return (status);
 		}
 		}
 
 
-		acpi_tb_print_table_header(0, table);
-
 		fprintf(stderr,
 		fprintf(stderr,
 			"Acpi table [%4.4s] successfully installed and loaded\n",
 			"Acpi table [%4.4s] successfully installed and loaded\n",
 			table->signature);
 			table->signature);
-	}
 
 
-	acpi_gbl_acpi_hardware_present = FALSE;
-	if (return_table) {
-		*return_table = table;
+		table_list_head = table_list_head->next;
 	}
 	}
 
 
-#endif				/* ACPI_APPLICATION */
 	return (AE_OK);
 	return (AE_OK);
 }
 }

+ 34 - 88
drivers/acpi/acpica/dbinput.c

@@ -45,6 +45,10 @@
 #include "accommon.h"
 #include "accommon.h"
 #include "acdebug.h"
 #include "acdebug.h"
 
 
+#ifdef ACPI_APPLICATION
+#include "acapps.h"
+#endif
+
 #define _COMPONENT          ACPI_CA_DEBUGGER
 #define _COMPONENT          ACPI_CA_DEBUGGER
 ACPI_MODULE_NAME("dbinput")
 ACPI_MODULE_NAME("dbinput")
 
 
@@ -53,8 +57,6 @@ static u32 acpi_db_get_line(char *input_buffer);
 
 
 static u32 acpi_db_match_command(char *user_command);
 static u32 acpi_db_match_command(char *user_command);
 
 
-static void acpi_db_single_thread(void);
-
 static void acpi_db_display_command_info(char *command, u8 display_all);
 static void acpi_db_display_command_info(char *command, u8 display_all);
 
 
 static void acpi_db_display_help(char *command);
 static void acpi_db_display_help(char *command);
@@ -623,9 +625,7 @@ static u32 acpi_db_get_line(char *input_buffer)
 
 
 	/* Uppercase the actual command */
 	/* Uppercase the actual command */
 
 
-	if (acpi_gbl_db_args[0]) {
-		acpi_ut_strupr(acpi_gbl_db_args[0]);
-	}
+	acpi_ut_strupr(acpi_gbl_db_args[0]);
 
 
 	count = i;
 	count = i;
 	if (count) {
 	if (count) {
@@ -1050,11 +1050,17 @@ acpi_db_command_dispatch(char *input_buffer,
 		acpi_db_close_debug_file();
 		acpi_db_close_debug_file();
 		break;
 		break;
 
 
-	case CMD_LOAD:
+	case CMD_LOAD:{
+			struct acpi_new_table_desc *list_head = NULL;
 
 
-		status =
-		    acpi_db_get_table_from_file(acpi_gbl_db_args[1], NULL,
-						FALSE);
+			status =
+			    ac_get_all_tables_from_file(acpi_gbl_db_args[1],
+							ACPI_GET_ALL_TABLES,
+							&list_head);
+			if (ACPI_SUCCESS(status)) {
+				acpi_db_load_tables(list_head);
+			}
+		}
 		break;
 		break;
 
 
 	case CMD_OPEN:
 	case CMD_OPEN:
@@ -1149,55 +1155,16 @@ acpi_db_command_dispatch(char *input_buffer,
 
 
 void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context)
 void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context)
 {
 {
-	acpi_status status = AE_OK;
-	acpi_status Mstatus;
-
-	while (status != AE_CTRL_TERMINATE && !acpi_gbl_db_terminate_loop) {
-		acpi_gbl_method_executing = FALSE;
-		acpi_gbl_step_to_next_call = FALSE;
-
-		Mstatus = acpi_os_acquire_mutex(acpi_gbl_db_command_ready,
-						ACPI_WAIT_FOREVER);
-		if (ACPI_FAILURE(Mstatus)) {
-			return;
-		}
-
-		status =
-		    acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, NULL);
 
 
-		acpi_os_release_mutex(acpi_gbl_db_command_complete);
-	}
+	(void)acpi_db_user_commands();
 	acpi_gbl_db_threads_terminated = TRUE;
 	acpi_gbl_db_threads_terminated = TRUE;
 }
 }
 
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_db_single_thread
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Debugger execute thread. Waits for a command line, then
- *              simply dispatches it.
- *
- ******************************************************************************/
-
-static void acpi_db_single_thread(void)
-{
-
-	acpi_gbl_method_executing = FALSE;
-	acpi_gbl_step_to_next_call = FALSE;
-
-	(void)acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, NULL);
-}
-
 /*******************************************************************************
 /*******************************************************************************
  *
  *
  * FUNCTION:    acpi_db_user_commands
  * FUNCTION:    acpi_db_user_commands
  *
  *
- * PARAMETERS:  prompt              - User prompt (depends on mode)
- *              op                  - Current executing parse op
+ * PARAMETERS:  None
  *
  *
  * RETURN:      None
  * RETURN:      None
  *
  *
@@ -1206,7 +1173,7 @@ static void acpi_db_single_thread(void)
  *
  *
  ******************************************************************************/
  ******************************************************************************/
 
 
-acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op)
+acpi_status acpi_db_user_commands(void)
 {
 {
 	acpi_status status = AE_OK;
 	acpi_status status = AE_OK;
 
 
@@ -1216,52 +1183,31 @@ acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op)
 
 
 	while (!acpi_gbl_db_terminate_loop) {
 	while (!acpi_gbl_db_terminate_loop) {
 
 
-		/* Force output to console until a command is entered */
-
-		acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT);
-
-		/* Different prompt if method is executing */
-
-		if (!acpi_gbl_method_executing) {
-			acpi_os_printf("%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
-		} else {
-			acpi_os_printf("%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
-		}
+		/* Wait the readiness of the command */
 
 
-		/* Get the user input line */
-
-		status = acpi_os_get_line(acpi_gbl_db_line_buf,
-					  ACPI_DB_LINE_BUFFER_SIZE, NULL);
+		status = acpi_os_wait_command_ready();
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"While parsing command line"));
-			return (status);
+			break;
 		}
 		}
 
 
-		/* Check for single or multithreaded debug */
+		/* Just call to the command line interpreter */
 
 
-		if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) {
-			/*
-			 * Signal the debug thread that we have a command to execute,
-			 * and wait for the command to complete.
-			 */
-			acpi_os_release_mutex(acpi_gbl_db_command_ready);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
+		acpi_gbl_method_executing = FALSE;
+		acpi_gbl_step_to_next_call = FALSE;
 
 
-			status =
-			    acpi_os_acquire_mutex(acpi_gbl_db_command_complete,
-						  ACPI_WAIT_FOREVER);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-		} else {
-			/* Just call to the command line interpreter */
+		(void)acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL,
+					       NULL);
+
+		/* Notify the completion of the command */
 
 
-			acpi_db_single_thread();
+		status = acpi_os_notify_command_complete();
+		if (ACPI_FAILURE(status)) {
+			break;
 		}
 		}
 	}
 	}
 
 
+	if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) {
+		ACPI_EXCEPTION((AE_INFO, status, "While parsing command line"));
+	}
 	return (status);
 	return (status);
 }
 }

+ 1 - 1
drivers/acpi/acpica/dbnames.c

@@ -438,7 +438,7 @@ acpi_db_walk_for_predefined_names(acpi_handle obj_handle,
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}
 
 
-	pathname = acpi_ns_get_external_pathname(node);
+	pathname = acpi_ns_get_normalized_pathname(node, TRUE);
 	if (!pathname) {
 	if (!pathname) {
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}

+ 1 - 0
drivers/acpi/acpica/dbstats.c

@@ -382,6 +382,7 @@ acpi_status acpi_db_display_statistics(char *type_arg)
 				       acpi_gbl_node_type_count[i],
 				       acpi_gbl_node_type_count[i],
 				       acpi_gbl_obj_type_count[i]);
 				       acpi_gbl_obj_type_count[i]);
 		}
 		}
+
 		acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown",
 		acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown",
 			       acpi_gbl_node_type_count_misc,
 			       acpi_gbl_node_type_count_misc,
 			       acpi_gbl_obj_type_count_misc);
 			       acpi_gbl_obj_type_count_misc);

+ 1 - 1
drivers/acpi/acpica/dbtest.c

@@ -953,7 +953,7 @@ acpi_db_evaluate_one_predefined_name(acpi_handle obj_handle,
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}
 
 
-	pathname = acpi_ns_get_external_pathname(node);
+	pathname = acpi_ns_get_normalized_pathname(node, TRUE);
 	if (!pathname) {
 	if (!pathname) {
 		return (AE_OK);
 		return (AE_OK);
 	}
 	}

+ 1 - 0
drivers/acpi/acpica/dbutils.c

@@ -173,6 +173,7 @@ void acpi_db_dump_external_object(union acpi_object *obj_desc, u32 level)
 			if (obj_desc->buffer.length > 16) {
 			if (obj_desc->buffer.length > 16) {
 				acpi_os_printf("\n");
 				acpi_os_printf("\n");
 			}
 			}
+
 			acpi_ut_debug_dump_buffer(ACPI_CAST_PTR
 			acpi_ut_debug_dump_buffer(ACPI_CAST_PTR
 						  (u8,
 						  (u8,
 						   obj_desc->buffer.pointer),
 						   obj_desc->buffer.pointer),

+ 48 - 45
drivers/acpi/acpica/dbxface.c

@@ -85,46 +85,21 @@ acpi_db_start_command(struct acpi_walk_state *walk_state,
 
 
 	acpi_gbl_method_executing = TRUE;
 	acpi_gbl_method_executing = TRUE;
 	status = AE_CTRL_TRUE;
 	status = AE_CTRL_TRUE;
-	while (status == AE_CTRL_TRUE) {
-		if (acpi_gbl_debugger_configuration == DEBUGGER_MULTI_THREADED) {
-
-			/* Handshake with the front-end that gets user command lines */
-
-			acpi_os_release_mutex(acpi_gbl_db_command_complete);
-
-			status =
-			    acpi_os_acquire_mutex(acpi_gbl_db_command_ready,
-						  ACPI_WAIT_FOREVER);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-		} else {
-			/* Single threaded, we must get a command line ourselves */
-
-			/* Force output to console until a command is entered */
 
 
-			acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT);
+	while (status == AE_CTRL_TRUE) {
 
 
-			/* Different prompt if method is executing */
+		/* Notify the completion of the command */
 
 
-			if (!acpi_gbl_method_executing) {
-				acpi_os_printf("%1c ",
-					       ACPI_DEBUGGER_COMMAND_PROMPT);
-			} else {
-				acpi_os_printf("%1c ",
-					       ACPI_DEBUGGER_EXECUTE_PROMPT);
-			}
+		status = acpi_os_notify_command_complete();
+		if (ACPI_FAILURE(status)) {
+			goto error_exit;
+		}
 
 
-			/* Get the user input line */
+		/* Wait the readiness of the command */
 
 
-			status = acpi_os_get_line(acpi_gbl_db_line_buf,
-						  ACPI_DB_LINE_BUFFER_SIZE,
-						  NULL);
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-						"While parsing command line"));
-				return (status);
-			}
+		status = acpi_os_wait_command_ready();
+		if (ACPI_FAILURE(status)) {
+			goto error_exit;
 		}
 		}
 
 
 		status =
 		status =
@@ -134,9 +109,44 @@ acpi_db_start_command(struct acpi_walk_state *walk_state,
 
 
 	/* acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); */
 	/* acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); */
 
 
+error_exit:
+	if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"While parsing/handling command line"));
+	}
 	return (status);
 	return (status);
 }
 }
 
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_db_signal_break_point
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Called for AML_BREAK_POINT_OP
+ *
+ ******************************************************************************/
+
+void acpi_db_signal_break_point(struct acpi_walk_state *walk_state)
+{
+
+#ifndef ACPI_APPLICATION
+	if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) {
+		return;
+	}
+#endif
+
+	/*
+	 * Set the single-step flag. This will cause the debugger (if present)
+	 * to break to the console within the AML debugger at the start of the
+	 * next AML instruction.
+	 */
+	acpi_gbl_cm_single_step = TRUE;
+	acpi_os_printf("**break** Executed AML BreakPoint opcode\n");
+}
+
 /*******************************************************************************
 /*******************************************************************************
  *
  *
  * FUNCTION:    acpi_db_single_step
  * FUNCTION:    acpi_db_single_step
@@ -420,15 +430,7 @@ acpi_status acpi_initialize_debugger(void)
 
 
 		/* These were created with one unit, grab it */
 		/* These were created with one unit, grab it */
 
 
-		status = acpi_os_acquire_mutex(acpi_gbl_db_command_complete,
-					       ACPI_WAIT_FOREVER);
-		if (ACPI_FAILURE(status)) {
-			acpi_os_printf("Could not get debugger mutex\n");
-			return_ACPI_STATUS(status);
-		}
-
-		status = acpi_os_acquire_mutex(acpi_gbl_db_command_ready,
-					       ACPI_WAIT_FOREVER);
+		status = acpi_os_initialize_command_signals();
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			acpi_os_printf("Could not get debugger mutex\n");
 			acpi_os_printf("Could not get debugger mutex\n");
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
@@ -473,13 +475,14 @@ void acpi_terminate_debugger(void)
 	acpi_gbl_db_terminate_loop = TRUE;
 	acpi_gbl_db_terminate_loop = TRUE;
 
 
 	if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) {
 	if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) {
-		acpi_os_release_mutex(acpi_gbl_db_command_ready);
 
 
 		/* Wait the AML Debugger threads */
 		/* Wait the AML Debugger threads */
 
 
 		while (!acpi_gbl_db_threads_terminated) {
 		while (!acpi_gbl_db_threads_terminated) {
 			acpi_os_sleep(100);
 			acpi_os_sleep(100);
 		}
 		}
+
+		acpi_os_terminate_command_signals();
 	}
 	}
 
 
 	if (acpi_gbl_db_buffer) {
 	if (acpi_gbl_db_buffer) {

+ 4 - 3
drivers/acpi/acpica/dsargs.c

@@ -194,8 +194,8 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
 	extra_desc = acpi_ns_get_secondary_object(obj_desc);
 	extra_desc = acpi_ns_get_secondary_object(obj_desc);
 	node = obj_desc->buffer_field.node;
 	node = obj_desc->buffer_field.node;
 
 
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
-						      node, NULL));
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_BUFFER_FIELD, node, NULL));
 
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
 			  acpi_ut_get_node_name(node)));
 			  acpi_ut_get_node_name(node)));
@@ -385,7 +385,8 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
 	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
 	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
 			(ACPI_TYPE_REGION, node, NULL));
 			(ACPI_TYPE_REGION, node, NULL));
 
 
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "[%4.4s] OpRegion Arg Init at AML %p\n",
 			  acpi_ut_get_node_name(node),
 			  acpi_ut_get_node_name(node),
 			  extra_desc->extra.aml_start));
 			  extra_desc->extra.aml_start));
 
 

+ 2 - 8
drivers/acpi/acpica/dscontrol.c

@@ -47,6 +47,7 @@
 #include "amlcode.h"
 #include "amlcode.h"
 #include "acdispat.h"
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acinterp.h"
+#include "acdebug.h"
 
 
 #define _COMPONENT          ACPI_DISPATCHER
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dscontrol")
 ACPI_MODULE_NAME("dscontrol")
@@ -348,14 +349,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
 
 
 	case AML_BREAK_POINT_OP:
 	case AML_BREAK_POINT_OP:
 
 
-		/*
-		 * Set the single-step flag. This will cause the debugger (if present)
-		 * to break to the console within the AML debugger at the start of the
-		 * next AML instruction.
-		 */
-		ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
-		ACPI_DEBUGGER_EXEC(acpi_os_printf
-				   ("**break** Executed AML BreakPoint opcode\n"));
+		acpi_db_signal_break_point(walk_state);
 
 
 		/* Call to the OSL in case OS wants a piece of the action */
 		/* Call to the OSL in case OS wants a piece of the action */
 
 

+ 3 - 2
drivers/acpi/acpica/dsdebug.c

@@ -161,6 +161,7 @@ acpi_ds_dump_method_stack(acpi_status status,
 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 			  "\n**** Exception %s during execution of method ",
 			  "\n**** Exception %s during execution of method ",
 			  acpi_format_exception(status)));
 			  acpi_format_exception(status)));
+
 	acpi_ds_print_node_pathname(walk_state->method_node, NULL);
 	acpi_ds_print_node_pathname(walk_state->method_node, NULL);
 
 
 	/* Display stack of executing methods */
 	/* Display stack of executing methods */
@@ -203,8 +204,8 @@ acpi_ds_dump_method_stack(acpi_status status,
 		} else {
 		} else {
 			/*
 			/*
 			 * This method has called another method
 			 * This method has called another method
-			 * NOTE: the method call parse subtree is already deleted at this
-			 * point, so we cannot disassemble the method invocation.
+			 * NOTE: the method call parse subtree is already deleted at
+			 * this point, so we cannot disassemble the method invocation.
 			 */
 			 */
 			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
 			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
 					      "Call to method "));
 					      "Call to method "));

+ 20 - 19
drivers/acpi/acpica/dsfield.c

@@ -106,6 +106,7 @@ acpi_ds_create_external_region(acpi_status lookup_status,
 	 * insert the name into the namespace.
 	 * insert the name into the namespace.
 	 */
 	 */
 	acpi_dm_add_op_to_external_list(op, path, ACPI_TYPE_REGION, 0, 0);
 	acpi_dm_add_op_to_external_list(op, path, ACPI_TYPE_REGION, 0, 0);
+
 	status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
 	status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
 				ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
 				ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
 				walk_state, node);
 				walk_state, node);
@@ -202,11 +203,10 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
 
 
 		/* Enter the name_string into the namespace */
 		/* Enter the name_string into the namespace */
 
 
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.string, ACPI_TYPE_ANY,
-				   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
-				   &node);
+		status = acpi_ns_lookup(walk_state->scope_info,
+					arg->common.value.string, ACPI_TYPE_ANY,
+					ACPI_IMODE_LOAD_PASS1, flags,
+					walk_state, &node);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
 			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
@@ -244,8 +244,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
 	}
 	}
 
 
 	/*
 	/*
-	 * Remember location in AML stream of the field unit opcode and operands --
-	 * since the buffer and index operands must be evaluated.
+	 * Remember location in AML stream of the field unit opcode and operands
+	 * -- since the buffer and index operands must be evaluated.
 	 */
 	 */
 	second_desc = obj_desc->common.next_object;
 	second_desc = obj_desc->common.next_object;
 	second_desc->extra.aml_start = op->named.data;
 	second_desc->extra.aml_start = op->named.data;
@@ -310,8 +310,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 		switch (arg->common.aml_opcode) {
 		switch (arg->common.aml_opcode) {
 		case AML_INT_RESERVEDFIELD_OP:
 		case AML_INT_RESERVEDFIELD_OP:
 
 
-			position = (u64) info->field_bit_position
-			    + (u64) arg->common.value.size;
+			position = (u64)info->field_bit_position +
+			    (u64)arg->common.value.size;
 
 
 			if (position > ACPI_UINT32_MAX) {
 			if (position > ACPI_UINT32_MAX) {
 				ACPI_ERROR((AE_INFO,
 				ACPI_ERROR((AE_INFO,
@@ -344,13 +344,13 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 
 
 			/* access_attribute (attrib_quick, attrib_byte, etc.) */
 			/* access_attribute (attrib_quick, attrib_byte, etc.) */
 
 
-			info->attribute =
-			    (u8)((arg->common.value.integer >> 8) & 0xFF);
+			info->attribute = (u8)
+			    ((arg->common.value.integer >> 8) & 0xFF);
 
 
 			/* access_length (for serial/buffer protocols) */
 			/* access_length (for serial/buffer protocols) */
 
 
-			info->access_length =
-			    (u8)((arg->common.value.integer >> 16) & 0xFF);
+			info->access_length = (u8)
+			    ((arg->common.value.integer >> 16) & 0xFF);
 			break;
 			break;
 
 
 		case AML_INT_CONNECTION_OP:
 		case AML_INT_CONNECTION_OP:
@@ -425,8 +425,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 
 
 			/* Keep track of bit position for the next field */
 			/* Keep track of bit position for the next field */
 
 
-			position = (u64) info->field_bit_position
-			    + (u64) arg->common.value.size;
+			position = (u64)info->field_bit_position +
+			    (u64)arg->common.value.size;
 
 
 			if (position > ACPI_UINT32_MAX) {
 			if (position > ACPI_UINT32_MAX) {
 				ACPI_ERROR((AE_INFO,
 				ACPI_ERROR((AE_INFO,
@@ -716,11 +716,12 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
 
 
 	/*
 	/*
 	 * Use Info.data_register_node to store bank_field Op
 	 * Use Info.data_register_node to store bank_field Op
-	 * It's safe because data_register_node will never be used when create bank field
-	 * We store aml_start and aml_length in the bank_field Op for late evaluation
-	 * Used in acpi_ex_prep_field_value(Info)
+	 * It's safe because data_register_node will never be used when create
+	 * bank field \we store aml_start and aml_length in the bank_field Op for
+	 * late evaluation. Used in acpi_ex_prep_field_value(Info)
 	 *
 	 *
-	 * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
+	 * TBD: Or, should we add a field in struct acpi_create_field_info, like
+	 * "void *ParentOp"?
 	 */
 	 */
 	info.data_register_node = (struct acpi_namespace_node *)op;
 	info.data_register_node = (struct acpi_namespace_node *)op;
 
 

+ 1 - 1
drivers/acpi/acpica/dsinit.c

@@ -247,7 +247,7 @@ acpi_ds_initialize_objects(u32 table_index,
 	/* Summary of objects initialized */
 	/* Summary of objects initialized */
 
 
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, "
+			      "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, "
 			      "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
 			      "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
 			      table->signature, table->oem_table_id, owner_id,
 			      table->signature, table->oem_table_id, owner_id,
 			      info.object_count, info.device_count,
 			      info.object_count, info.device_count,

+ 27 - 12
drivers/acpi/acpica/dsmethod.c

@@ -118,10 +118,9 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
 		return_ACPI_STATUS(AE_NO_MEMORY);
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
 	}
 
 
-	status =
-	    acpi_ds_init_aml_walk(walk_state, op, node,
-				  obj_desc->method.aml_start,
-				  obj_desc->method.aml_length, NULL, 0);
+	status = acpi_ds_init_aml_walk(walk_state, op, node,
+				       obj_desc->method.aml_start,
+				       obj_desc->method.aml_length, NULL, 0);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		acpi_ds_delete_walk_state(walk_state);
 		acpi_ds_delete_walk_state(walk_state);
 		acpi_ps_free_op(op);
 		acpi_ps_free_op(op);
@@ -375,7 +374,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
 		    && (walk_state->thread->current_sync_level >
 		    && (walk_state->thread->current_sync_level >
 			obj_desc->method.mutex->mutex.sync_level)) {
 			obj_desc->method.mutex->mutex.sync_level)) {
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
-				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
+				    "Cannot acquire Mutex for method [%4.4s]"
+				    ", current SyncLevel is too large (%u)",
 				    acpi_ut_get_node_name(method_node),
 				    acpi_ut_get_node_name(method_node),
 				    walk_state->thread->current_sync_level));
 				    walk_state->thread->current_sync_level));
 
 
@@ -411,8 +411,19 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
 
 
 				obj_desc->method.mutex->mutex.thread_id =
 				obj_desc->method.mutex->mutex.thread_id =
 				    walk_state->thread->thread_id;
 				    walk_state->thread->thread_id;
-				walk_state->thread->current_sync_level =
-				    obj_desc->method.sync_level;
+
+				/*
+				 * Update the current sync_level only if this is not an auto-
+				 * serialized method. In the auto case, we have to ignore
+				 * the sync level for the method mutex (created for the
+				 * auto-serialization) because we have no idea of what the
+				 * sync level should be. Therefore, just ignore it.
+				 */
+				if (!(obj_desc->method.info_flags &
+				      ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
+					walk_state->thread->current_sync_level =
+					    obj_desc->method.sync_level;
+				}
 			} else {
 			} else {
 				obj_desc->method.mutex->mutex.
 				obj_desc->method.mutex->mutex.
 				    original_sync_level =
 				    original_sync_level =
@@ -501,16 +512,18 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
 
 
 	/* Init for new method, possibly wait on method mutex */
 	/* Init for new method, possibly wait on method mutex */
 
 
-	status = acpi_ds_begin_method_execution(method_node, obj_desc,
-						this_walk_state);
+	status =
+	    acpi_ds_begin_method_execution(method_node, obj_desc,
+					   this_walk_state);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 		return_ACPI_STATUS(status);
 	}
 	}
 
 
 	/* Begin method parse/execution. Create a new walk state */
 	/* Begin method parse/execution. Create a new walk state */
 
 
-	next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
-						    NULL, obj_desc, thread);
+	next_walk_state =
+	    acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
+				      thread);
 	if (!next_walk_state) {
 	if (!next_walk_state) {
 		status = AE_NO_MEMORY;
 		status = AE_NO_MEMORY;
 		goto cleanup;
 		goto cleanup;
@@ -797,7 +810,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
 		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
 		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
 			if (walk_state) {
 			if (walk_state) {
 				ACPI_INFO((AE_INFO,
 				ACPI_INFO((AE_INFO,
-					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+					   "Marking method %4.4s as Serialized "
+					   "because of AE_ALREADY_EXISTS error",
 					   walk_state->method_node->name.
 					   walk_state->method_node->name.
 					   ascii));
 					   ascii));
 			}
 			}
@@ -815,6 +829,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
 			 */
 			 */
 			method_desc->method.info_flags &=
 			method_desc->method.info_flags &=
 			    ~ACPI_METHOD_SERIALIZED_PENDING;
 			    ~ACPI_METHOD_SERIALIZED_PENDING;
+
 			method_desc->method.info_flags |=
 			method_desc->method.info_flags |=
 			    (ACPI_METHOD_SERIALIZED |
 			    (ACPI_METHOD_SERIALIZED |
 			     ACPI_METHOD_IGNORE_SYNC_LEVEL);
 			     ACPI_METHOD_IGNORE_SYNC_LEVEL);

+ 11 - 9
drivers/acpi/acpica/dsmthdat.c

@@ -99,6 +99,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
 	for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
 	for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
 		ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
 		ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
 				   NAMEOF_ARG_NTE);
 				   NAMEOF_ARG_NTE);
+
 		walk_state->arguments[i].name.integer |= (i << 24);
 		walk_state->arguments[i].name.integer |= (i << 24);
 		walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
 		walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
 		walk_state->arguments[i].type = ACPI_TYPE_ANY;
 		walk_state->arguments[i].type = ACPI_TYPE_ANY;
@@ -201,7 +202,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
 
 
 	if (!params) {
 	if (!params) {
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "No param list passed to method\n"));
+				  "No parameter list passed to method\n"));
 		return_ACPI_STATUS(AE_OK);
 		return_ACPI_STATUS(AE_OK);
 	}
 	}
 
 
@@ -214,9 +215,9 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
 		 * Store the argument in the method/walk descriptor.
 		 * Store the argument in the method/walk descriptor.
 		 * Do not copy the arg in order to implement call by reference
 		 * Do not copy the arg in order to implement call by reference
 		 */
 		 */
-		status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
-						       params[index],
-						       walk_state);
+		status =
+		    acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
+						  params[index], walk_state);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
 		}
 		}
@@ -610,11 +611,11 @@ acpi_ds_store_object_to_local(u8 type,
 			 * do the indirect store
 			 * do the indirect store
 			 */
 			 */
 			if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
 			if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
-			     ACPI_DESC_TYPE_OPERAND)
-			    && (current_obj_desc->common.type ==
-				ACPI_TYPE_LOCAL_REFERENCE)
-			    && (current_obj_desc->reference.class ==
-				ACPI_REFCLASS_REFOF)) {
+			     ACPI_DESC_TYPE_OPERAND) &&
+			    (current_obj_desc->common.type ==
+			     ACPI_TYPE_LOCAL_REFERENCE) &&
+			    (current_obj_desc->reference.class ==
+			     ACPI_REFCLASS_REFOF)) {
 				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
 				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
 						  "Arg (%p) is an ObjRef(Node), storing in node %p\n",
 						  "Arg (%p) is an ObjRef(Node), storing in node %p\n",
 						  new_obj_desc,
 						  new_obj_desc,
@@ -638,6 +639,7 @@ acpi_ds_store_object_to_local(u8 type,
 				if (new_obj_desc != obj_desc) {
 				if (new_obj_desc != obj_desc) {
 					acpi_ut_remove_reference(new_obj_desc);
 					acpi_ut_remove_reference(new_obj_desc);
 				}
 				}
+
 				return_ACPI_STATUS(status);
 				return_ACPI_STATUS(status);
 			}
 			}
 		}
 		}

+ 11 - 8
drivers/acpi/acpica/dsobject.c

@@ -463,10 +463,10 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
 						  arg->common.node);
 						  arg->common.node);
 			}
 			}
 		} else {
 		} else {
-			status = acpi_ds_build_internal_object(walk_state, arg,
-							       &obj_desc->
-							       package.
-							       elements[i]);
+			status =
+			    acpi_ds_build_internal_object(walk_state, arg,
+							  &obj_desc->package.
+							  elements[i]);
 		}
 		}
 
 
 		if (*obj_desc_ptr) {
 		if (*obj_desc_ptr) {
@@ -525,7 +525,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
 		}
 		}
 
 
 		ACPI_INFO((AE_INFO,
 		ACPI_INFO((AE_INFO,
-			   "Actual Package length (%u) is larger than NumElements field (%u), truncated",
+			   "Actual Package length (%u) is larger than "
+			   "NumElements field (%u), truncated",
 			   i, element_count));
 			   i, element_count));
 	} else if (i < element_count) {
 	} else if (i < element_count) {
 		/*
 		/*
@@ -533,7 +534,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
 		 * Note: this is not an error, the package is padded out with NULLs.
 		 * Note: this is not an error, the package is padded out with NULLs.
 		 */
 		 */
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
+				  "Package List length (%u) smaller than NumElements "
+				  "count (%u), padded with null elements\n",
 				  i, element_count));
 				  i, element_count));
 	}
 	}
 
 
@@ -584,8 +586,9 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
 
 
 	/* Build an internal object for the argument(s) */
 	/* Build an internal object for the argument(s) */
 
 
-	status = acpi_ds_build_internal_object(walk_state, op->common.value.arg,
-					       &obj_desc);
+	status =
+	    acpi_ds_build_internal_object(walk_state, op->common.value.arg,
+					  &obj_desc);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 		return_ACPI_STATUS(status);
 	}
 	}

+ 12 - 9
drivers/acpi/acpica/dsopcode.c

@@ -243,8 +243,9 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
 	 * For field_flags, use LOCK_RULE = 0 (NO_LOCK),
 	 * For field_flags, use LOCK_RULE = 0 (NO_LOCK),
 	 * UPDATE_RULE = 0 (UPDATE_PRESERVE)
 	 * UPDATE_RULE = 0 (UPDATE_PRESERVE)
 	 */
 	 */
-	status = acpi_ex_prep_common_field_object(obj_desc, field_flags, 0,
-						  bit_offset, bit_count);
+	status =
+	    acpi_ex_prep_common_field_object(obj_desc, field_flags, 0,
+					     bit_offset, bit_count);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		goto cleanup;
 		goto cleanup;
 	}
 	}
@@ -330,8 +331,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
 
 
 	/* Resolve the operands */
 	/* Resolve the operands */
 
 
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
+	status =
+	    acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS,
+				     walk_state);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
 		ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
 			    acpi_ps_get_opcode_name(op->common.aml_opcode),
 			    acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -414,8 +416,9 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
 
 
 	/* Resolve the length and address operands to numbers */
 	/* Resolve the length and address operands to numbers */
 
 
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
+	status =
+	    acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS,
+				     walk_state);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 		return_ACPI_STATUS(status);
 	}
 	}
@@ -452,7 +455,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
 	/* Now the address and length are valid for this opregion */
 	/* Now the address and length are valid for this opregion */
 
 
 	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
 	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
-
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
 
 
@@ -510,8 +512,9 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
 	 * Resolve the Signature string, oem_id string,
 	 * Resolve the Signature string, oem_id string,
 	 * and oem_table_id string operands
 	 * and oem_table_id string operands
 	 */
 	 */
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
+	status =
+	    acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS,
+				     walk_state);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		goto cleanup;
 		goto cleanup;
 	}
 	}

+ 22 - 23
drivers/acpi/acpica/dsutils.c

@@ -245,9 +245,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
 			 * we will use the return value
 			 * we will use the return value
 			 */
 			 */
 			if ((walk_state->control_state->common.state ==
 			if ((walk_state->control_state->common.state ==
-			     ACPI_CONTROL_PREDICATE_EXECUTING)
-			    && (walk_state->control_state->control.
-				predicate_op == op)) {
+			     ACPI_CONTROL_PREDICATE_EXECUTING) &&
+			    (walk_state->control_state->control.predicate_op ==
+			     op)) {
 				goto result_used;
 				goto result_used;
 			}
 			}
 			break;
 			break;
@@ -481,10 +481,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 
 
 		/* Get the entire name string from the AML stream */
 		/* Get the entire name string from the AML stream */
 
 
-		status =
-		    acpi_ex_get_name_string(ACPI_TYPE_ANY,
-					    arg->common.value.buffer,
-					    &name_string, &name_length);
+		status = acpi_ex_get_name_string(ACPI_TYPE_ANY,
+						 arg->common.value.buffer,
+						 &name_string, &name_length);
 
 
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
@@ -503,9 +502,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 		 */
 		 */
 		if ((walk_state->deferred_node) &&
 		if ((walk_state->deferred_node) &&
 		    (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
 		    (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
-		    && (arg_index ==
-			(u32) ((walk_state->opcode ==
-				AML_CREATE_FIELD_OP) ? 3 : 2))) {
+		    && (arg_index == (u32)
+			((walk_state->opcode == AML_CREATE_FIELD_OP) ? 3 : 2))) {
 			obj_desc =
 			obj_desc =
 			    ACPI_CAST_PTR(union acpi_operand_object,
 			    ACPI_CAST_PTR(union acpi_operand_object,
 					  walk_state->deferred_node);
 					  walk_state->deferred_node);
@@ -522,9 +520,10 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 			op_info =
 			op_info =
 			    acpi_ps_get_opcode_info(parent_op->common.
 			    acpi_ps_get_opcode_info(parent_op->common.
 						    aml_opcode);
 						    aml_opcode);
-			if ((op_info->flags & AML_NSNODE)
-			    && (parent_op->common.aml_opcode !=
-				AML_INT_METHODCALL_OP)
+
+			if ((op_info->flags & AML_NSNODE) &&
+			    (parent_op->common.aml_opcode !=
+			     AML_INT_METHODCALL_OP)
 			    && (parent_op->common.aml_opcode != AML_REGION_OP)
 			    && (parent_op->common.aml_opcode != AML_REGION_OP)
 			    && (parent_op->common.aml_opcode !=
 			    && (parent_op->common.aml_opcode !=
 				AML_INT_NAMEPATH_OP)) {
 				AML_INT_NAMEPATH_OP)) {
@@ -605,8 +604,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
 		}
 		}
-		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-				   (obj_desc, walk_state));
+
+		acpi_db_display_argument_object(obj_desc, walk_state);
 	} else {
 	} else {
 		/* Check for null name case */
 		/* Check for null name case */
 
 
@@ -633,15 +632,16 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
 			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
 		}
 		}
 
 
-		if ((op_info->flags & AML_HAS_RETVAL)
-		    || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+		if ((op_info->flags & AML_HAS_RETVAL) ||
+		    (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
 			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 					  "Argument previously created, already stacked\n"));
 					  "Argument previously created, already stacked\n"));
 
 
-			ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-					   (walk_state->
-					    operands[walk_state->num_operands -
-						     1], walk_state));
+			acpi_db_display_argument_object(walk_state->
+							operands[walk_state->
+								 num_operands -
+								 1],
+							walk_state);
 
 
 			/*
 			/*
 			 * Use value that was already previously returned
 			 * Use value that was already previously returned
@@ -685,8 +685,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
 		}
 		}
 
 
-		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-				   (obj_desc, walk_state));
+		acpi_db_display_argument_object(obj_desc, walk_state);
 	}
 	}
 
 
 	return_ACPI_STATUS(AE_OK);
 	return_ACPI_STATUS(AE_OK);

+ 17 - 18
drivers/acpi/acpica/dswexec.c

@@ -172,14 +172,14 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
 
 
 cleanup:
 cleanup:
 
 
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Completed a predicate eval=%X Op=%p\n",
 			  walk_state->control_state->common.value,
 			  walk_state->control_state->common.value,
 			  walk_state->op));
 			  walk_state->op));
 
 
 	/* Break to debugger to display result */
 	/* Break to debugger to display result */
 
 
-	ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
-			   (local_obj_desc, walk_state));
+	acpi_db_display_result_object(local_obj_desc, walk_state);
 
 
 	/*
 	/*
 	 * Delete the predicate result object (we know that
 	 * Delete the predicate result object (we know that
@@ -264,8 +264,8 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
 	    (walk_state->control_state->common.state ==
 	    (walk_state->control_state->common.state ==
 	     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
 	     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Exec predicate Op=%p State=%p\n", op,
-				  walk_state));
+				  "Exec predicate Op=%p State=%p\n",
+				  op, walk_state));
 
 
 		walk_state->control_state->common.state =
 		walk_state->control_state->common.state =
 		    ACPI_CONTROL_PREDICATE_EXECUTING;
 		    ACPI_CONTROL_PREDICATE_EXECUTING;
@@ -386,11 +386,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 
 
 	/* Call debugger for single step support (DEBUG build only) */
 	/* Call debugger for single step support (DEBUG build only) */
 
 
-	ACPI_DEBUGGER_EXEC(status =
-			   acpi_db_single_step(walk_state, op, op_class));
-	ACPI_DEBUGGER_EXEC(if (ACPI_FAILURE(status)) {
-			   return_ACPI_STATUS(status);}
-	) ;
+	status = acpi_db_single_step(walk_state, op, op_class);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
 
 
 	/* Decode the Opcode Class */
 	/* Decode the Opcode Class */
 
 
@@ -502,9 +501,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 						  "Method Reference in a Package, Op=%p\n",
 						  "Method Reference in a Package, Op=%p\n",
 						  op));
 						  op));
 
 
-				op->common.node =
-				    (struct acpi_namespace_node *)op->asl.value.
-				    arg->asl.node;
+				op->common.node = (struct acpi_namespace_node *)
+				    op->asl.value.arg->asl.node;
 				acpi_ut_add_reference(op->asl.value.arg->asl.
 				acpi_ut_add_reference(op->asl.value.arg->asl.
 						      node->object);
 						      node->object);
 				return_ACPI_STATUS(AE_OK);
 				return_ACPI_STATUS(AE_OK);
@@ -586,8 +584,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 				 * Put the Node on the object stack (Contains the ACPI Name
 				 * Put the Node on the object stack (Contains the ACPI Name
 				 * of this object)
 				 * of this object)
 				 */
 				 */
-				walk_state->operands[0] =
-				    (void *)op->common.parent->common.node;
+				walk_state->operands[0] = (void *)
+				    op->common.parent->common.node;
 				walk_state->num_operands = 1;
 				walk_state->num_operands = 1;
 
 
 				status = acpi_ds_create_node(walk_state,
 				status = acpi_ds_create_node(walk_state,
@@ -692,7 +690,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 		default:
 		default:
 
 
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
-				    "Unimplemented opcode, class=0x%X type=0x%X Opcode=0x%X Op=%p",
+				    "Unimplemented opcode, class=0x%X "
+				    "type=0x%X Opcode=0x%X Op=%p",
 				    op_class, op_type, op->common.aml_opcode,
 				    op_class, op_type, op->common.aml_opcode,
 				    op));
 				    op));
 
 
@@ -728,8 +727,8 @@ cleanup:
 
 
 		/* Break to debugger to display result */
 		/* Break to debugger to display result */
 
 
-		ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
-				   (walk_state->result_obj, walk_state));
+		acpi_db_display_result_object(walk_state->result_obj,
+					      walk_state);
 
 
 		/*
 		/*
 		 * Delete the result op if and only if:
 		 * Delete the result op if and only if:

+ 3 - 7
drivers/acpi/acpica/dswload.c

@@ -476,13 +476,9 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
 			status =
 			status =
 			    acpi_ex_create_region(op->named.data,
 			    acpi_ex_create_region(op->named.data,
 						  op->named.length,
 						  op->named.length,
-						  (acpi_adr_space_type) ((op->
-									  common.
-									  value.
-									  arg)->
-									 common.
-									 value.
-									 integer),
+						  (acpi_adr_space_type)
+						  ((op->common.value.arg)->
+						   common.value.integer),
 						  walk_state);
 						  walk_state);
 			if (ACPI_FAILURE(status)) {
 			if (ACPI_FAILURE(status)) {
 				return_ACPI_STATUS(status);
 				return_ACPI_STATUS(status);

+ 5 - 5
drivers/acpi/acpica/dswload2.c

@@ -598,11 +598,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
 				 * Executing a method: initialize the region and unlock
 				 * Executing a method: initialize the region and unlock
 				 * the interpreter
 				 * the interpreter
 				 */
 				 */
-				status =
-				    acpi_ex_create_region(op->named.data,
-							  op->named.length,
-							  region_space,
-							  walk_state);
+				status = acpi_ex_create_region(op->named.data,
+							       op->named.length,
+							       region_space,
+							       walk_state);
 				if (ACPI_FAILURE(status)) {
 				if (ACPI_FAILURE(status)) {
 					return_ACPI_STATUS(status);
 					return_ACPI_STATUS(status);
 				}
 				}
@@ -664,6 +663,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
 								  length,
 								  length,
 								  walk_state);
 								  walk_state);
 				}
 				}
+
 				walk_state->operands[0] = NULL;
 				walk_state->operands[0] = NULL;
 				walk_state->num_operands = 0;
 				walk_state->num_operands = 0;
 
 

+ 1 - 0
drivers/acpi/acpica/dswscope.c

@@ -77,6 +77,7 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
 				  "Popped object type (%s)\n",
 				  "Popped object type (%s)\n",
 				  acpi_ut_get_type_name(scope_info->common.
 				  acpi_ut_get_type_name(scope_info->common.
 							value)));
 							value)));
+
 		acpi_ut_delete_generic_state(scope_info);
 		acpi_ut_delete_generic_state(scope_info);
 	}
 	}
 }
 }

+ 1 - 1
drivers/acpi/acpica/evgpe.c

@@ -92,8 +92,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
 			     (u8)register_bit);
 			     (u8)register_bit);
 	}
 	}
-	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
 
 
+	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
 	return_ACPI_STATUS(AE_OK);
 	return_ACPI_STATUS(AE_OK);
 }
 }
 
 

+ 1 - 0
drivers/acpi/acpica/evgpeblk.c

@@ -167,6 +167,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
 		if (gpe_block->next) {
 		if (gpe_block->next) {
 			gpe_block->next->previous = gpe_block->previous;
 			gpe_block->next->previous = gpe_block->previous;
 		}
 		}
+
 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	}
 	}
 
 

+ 1 - 0
drivers/acpi/acpica/evgpeutil.c

@@ -346,6 +346,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 					ACPI_FREE(notify);
 					ACPI_FREE(notify);
 					notify = next;
 					notify = next;
 				}
 				}
+
 				gpe_event_info->dispatch.notify_list = NULL;
 				gpe_event_info->dispatch.notify_list = NULL;
 				gpe_event_info->flags &=
 				gpe_event_info->flags &=
 				    ~ACPI_GPE_DISPATCH_MASK;
 				    ~ACPI_GPE_DISPATCH_MASK;

+ 95 - 70
drivers/acpi/acpica/evhandler.c

@@ -159,7 +159,7 @@ acpi_ev_has_default_handler(struct acpi_namespace_node *node,
 
 
 	obj_desc = acpi_ns_get_attached_object(node);
 	obj_desc = acpi_ns_get_attached_object(node);
 	if (obj_desc) {
 	if (obj_desc) {
-		handler_obj = obj_desc->device.handler;
+		handler_obj = obj_desc->common_notify.handler;
 
 
 		/* Walk the linked list of handlers for this object */
 		/* Walk the linked list of handlers for this object */
 
 
@@ -247,35 +247,31 @@ acpi_ev_install_handler(acpi_handle obj_handle,
 
 
 		/* Check if this Device already has a handler for this address space */
 		/* Check if this Device already has a handler for this address space */
 
 
-		next_handler_obj = obj_desc->device.handler;
-		while (next_handler_obj) {
+		next_handler_obj =
+		    acpi_ev_find_region_handler(handler_obj->address_space.
+						space_id,
+						obj_desc->common_notify.
+						handler);
+		if (next_handler_obj) {
 
 
 			/* Found a handler, is it for the same address space? */
 			/* Found a handler, is it for the same address space? */
 
 
-			if (next_handler_obj->address_space.space_id ==
-			    handler_obj->address_space.space_id) {
-				ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-						  "Found handler for region [%s] in device %p(%p) "
-						  "handler %p\n",
-						  acpi_ut_get_region_name
-						  (handler_obj->address_space.
-						   space_id), obj_desc,
-						  next_handler_obj,
-						  handler_obj));
-
-				/*
-				 * Since the object we found it on was a device, then it
-				 * means that someone has already installed a handler for
-				 * the branch of the namespace from this device on. Just
-				 * bail out telling the walk routine to not traverse this
-				 * branch. This preserves the scoping rule for handlers.
-				 */
-				return (AE_CTRL_DEPTH);
-			}
-
-			/* Walk the linked list of handlers attached to this device */
-
-			next_handler_obj = next_handler_obj->address_space.next;
+			ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+					  "Found handler for region [%s] in device %p(%p) handler %p\n",
+					  acpi_ut_get_region_name(handler_obj->
+								  address_space.
+								  space_id),
+					  obj_desc, next_handler_obj,
+					  handler_obj));
+
+			/*
+			 * Since the object we found it on was a device, then it means
+			 * that someone has already installed a handler for the branch
+			 * of the namespace from this device on. Just bail out telling
+			 * the walk routine to not traverse this branch. This preserves
+			 * the scoping rule for handlers.
+			 */
+			return (AE_CTRL_DEPTH);
 		}
 		}
 
 
 		/*
 		/*
@@ -307,6 +303,44 @@ acpi_ev_install_handler(acpi_handle obj_handle,
 	return (status);
 	return (status);
 }
 }
 
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_find_region_handler
+ *
+ * PARAMETERS:  space_id        - The address space ID
+ *              handler_obj     - Head of the handler object list
+ *
+ * RETURN:      Matching handler object. NULL if space ID not matched
+ *
+ * DESCRIPTION: Search a handler object list for a match on the address
+ *              space ID.
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type
+						       space_id,
+						       union acpi_operand_object
+						       *handler_obj)
+{
+
+	/* Walk the handler list for this device */
+
+	while (handler_obj) {
+
+		/* Same space_id indicates a handler is installed */
+
+		if (handler_obj->address_space.space_id == space_id) {
+			return (handler_obj);
+		}
+
+		/* Next handler object */
+
+		handler_obj = handler_obj->address_space.next;
+	}
+
+	return (NULL);
+}
+
 /*******************************************************************************
 /*******************************************************************************
  *
  *
  * FUNCTION:    acpi_ev_install_space_handler
  * FUNCTION:    acpi_ev_install_space_handler
@@ -332,15 +366,15 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
 {
 {
 	union acpi_operand_object *obj_desc;
 	union acpi_operand_object *obj_desc;
 	union acpi_operand_object *handler_obj;
 	union acpi_operand_object *handler_obj;
-	acpi_status status;
+	acpi_status status = AE_OK;
 	acpi_object_type type;
 	acpi_object_type type;
 	u8 flags = 0;
 	u8 flags = 0;
 
 
 	ACPI_FUNCTION_TRACE(ev_install_space_handler);
 	ACPI_FUNCTION_TRACE(ev_install_space_handler);
 
 
 	/*
 	/*
-	 * This registration is valid for only the types below and the root. This
-	 * is where the default handlers get placed.
+	 * This registration is valid for only the types below and the root.
+	 * The root node is where the default handlers get installed.
 	 */
 	 */
 	if ((node->type != ACPI_TYPE_DEVICE) &&
 	if ((node->type != ACPI_TYPE_DEVICE) &&
 	    (node->type != ACPI_TYPE_PROCESSOR) &&
 	    (node->type != ACPI_TYPE_PROCESSOR) &&
@@ -407,38 +441,30 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
 	obj_desc = acpi_ns_get_attached_object(node);
 	obj_desc = acpi_ns_get_attached_object(node);
 	if (obj_desc) {
 	if (obj_desc) {
 		/*
 		/*
-		 * The attached device object already exists. Make sure the handler
-		 * is not already installed.
+		 * The attached device object already exists. Now make sure
+		 * the handler is not already installed.
 		 */
 		 */
-		handler_obj = obj_desc->device.handler;
-
-		/* Walk the handler list for this device */
-
-		while (handler_obj) {
-
-			/* Same space_id indicates a handler already installed */
+		handler_obj = acpi_ev_find_region_handler(space_id,
+							  obj_desc->
+							  common_notify.
+							  handler);
 
 
-			if (handler_obj->address_space.space_id == space_id) {
-				if (handler_obj->address_space.handler ==
-				    handler) {
-					/*
-					 * It is (relatively) OK to attempt to install the SAME
-					 * handler twice. This can easily happen with the
-					 * PCI_Config space.
-					 */
-					status = AE_SAME_HANDLER;
-					goto unlock_and_exit;
-				} else {
-					/* A handler is already installed */
-
-					status = AE_ALREADY_EXISTS;
-				}
+		if (handler_obj) {
+			if (handler_obj->address_space.handler == handler) {
+				/*
+				 * It is (relatively) OK to attempt to install the SAME
+				 * handler twice. This can easily happen with the
+				 * PCI_Config space.
+				 */
+				status = AE_SAME_HANDLER;
 				goto unlock_and_exit;
 				goto unlock_and_exit;
-			}
+			} else {
+				/* A handler is already installed */
 
 
-			/* Walk the linked list of handlers */
+				status = AE_ALREADY_EXISTS;
+			}
 
 
-			handler_obj = handler_obj->address_space.next;
+			goto unlock_and_exit;
 		}
 		}
 	} else {
 	} else {
 		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@@ -477,7 +503,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
 	}
 	}
 
 
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+			  "Installing address handler for region %s(%X) "
+			  "on Device %4.4s %p(%p)\n",
 			  acpi_ut_get_region_name(space_id), space_id,
 			  acpi_ut_get_region_name(space_id), space_id,
 			  acpi_ut_get_node_name(node), node, obj_desc));
 			  acpi_ut_get_node_name(node), node, obj_desc));
 
 
@@ -506,28 +533,26 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
 
 
 	/* Install at head of Device.address_space list */
 	/* Install at head of Device.address_space list */
 
 
-	handler_obj->address_space.next = obj_desc->device.handler;
+	handler_obj->address_space.next = obj_desc->common_notify.handler;
 
 
 	/*
 	/*
 	 * The Device object is the first reference on the handler_obj.
 	 * The Device object is the first reference on the handler_obj.
 	 * Each region that uses the handler adds a reference.
 	 * Each region that uses the handler adds a reference.
 	 */
 	 */
-	obj_desc->device.handler = handler_obj;
+	obj_desc->common_notify.handler = handler_obj;
 
 
 	/*
 	/*
-	 * Walk the namespace finding all of the regions this
-	 * handler will manage.
+	 * Walk the namespace finding all of the regions this handler will
+	 * manage.
 	 *
 	 *
-	 * Start at the device and search the branch toward
-	 * the leaf nodes until either the leaf is encountered or
-	 * a device is detected that has an address handler of the
-	 * same type.
+	 * Start at the device and search the branch toward the leaf nodes
+	 * until either the leaf is encountered or a device is detected that
+	 * has an address handler of the same type.
 	 *
 	 *
-	 * In either case, back up and search down the remainder
-	 * of the branch
+	 * In either case, back up and search down the remainder of the branch
 	 */
 	 */
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
-					ACPI_NS_WALK_UNLOCK,
+	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node,
+					ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
 					acpi_ev_install_handler, NULL,
 					acpi_ev_install_handler, NULL,
 					handler_obj, NULL);
 					handler_obj, NULL);
 
 

+ 3 - 2
drivers/acpi/acpica/evmisc.c

@@ -68,6 +68,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
 
 
 u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
 u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
 {
 {
+
 	switch (node->type) {
 	switch (node->type) {
 	case ACPI_TYPE_DEVICE:
 	case ACPI_TYPE_DEVICE:
 	case ACPI_TYPE_PROCESSOR:
 	case ACPI_TYPE_PROCESSOR:
@@ -170,8 +171,8 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
 			  acpi_ut_get_notify_name(notify_value, ACPI_TYPE_ANY),
 			  acpi_ut_get_notify_name(notify_value, ACPI_TYPE_ANY),
 			  node));
 			  node));
 
 
-	status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
-				 info);
+	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+				 acpi_ev_notify_dispatch, info);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		acpi_ut_delete_generic_state(info);
 		acpi_ut_delete_generic_state(info);
 	}
 	}

+ 92 - 22
drivers/acpi/acpica/evregion.c

@@ -97,15 +97,12 @@ acpi_status acpi_ev_initialize_op_regions(void)
 		if (acpi_ev_has_default_handler(acpi_gbl_root_node,
 		if (acpi_ev_has_default_handler(acpi_gbl_root_node,
 						acpi_gbl_default_address_spaces
 						acpi_gbl_default_address_spaces
 						[i])) {
 						[i])) {
-			status =
-			    acpi_ev_execute_reg_methods(acpi_gbl_root_node,
-							acpi_gbl_default_address_spaces
-							[i]);
+			acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+						    acpi_gbl_default_address_spaces
+						    [i], ACPI_REG_CONNECT);
 		}
 		}
 	}
 	}
 
 
-	acpi_gbl_reg_methods_executed = TRUE;
-
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
@@ -127,6 +124,12 @@ acpi_status acpi_ev_initialize_op_regions(void)
  * DESCRIPTION: Dispatch an address space or operation region access to
  * DESCRIPTION: Dispatch an address space or operation region access to
  *              a previously installed handler.
  *              a previously installed handler.
  *
  *
+ * NOTE: During early initialization, we always install the default region
+ * handlers for Memory, I/O and PCI_Config. This ensures that these operation
+ * region address spaces are always available as per the ACPI specification.
+ * This is especially needed in order to support the execution of
+ * module-level AML code during loading of the ACPI tables.
+ *
  ******************************************************************************/
  ******************************************************************************/
 
 
 acpi_status
 acpi_status
@@ -498,6 +501,12 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
 
 
 	ACPI_FUNCTION_TRACE(ev_attach_region);
 	ACPI_FUNCTION_TRACE(ev_attach_region);
 
 
+	/* Install the region's handler */
+
+	if (region_obj->region.handler) {
+		return_ACPI_STATUS(AE_ALREADY_EXISTS);
+	}
+
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 			  "Adding Region [%4.4s] %p to address handler %p [%s]\n",
 			  "Adding Region [%4.4s] %p to address handler %p [%s]\n",
 			  acpi_ut_get_node_name(region_obj->region.node),
 			  acpi_ut_get_node_name(region_obj->region.node),
@@ -509,17 +518,56 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
 
 
 	region_obj->region.next = handler_obj->address_space.region_list;
 	region_obj->region.next = handler_obj->address_space.region_list;
 	handler_obj->address_space.region_list = region_obj;
 	handler_obj->address_space.region_list = region_obj;
+	region_obj->region.handler = handler_obj;
+	acpi_ut_add_reference(handler_obj);
 
 
-	/* Install the region's handler */
+	return_ACPI_STATUS(AE_OK);
+}
 
 
-	if (region_obj->region.handler) {
-		return_ACPI_STATUS(AE_ALREADY_EXISTS);
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_associate_reg_method
+ *
+ * PARAMETERS:  region_obj          - Region object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Find and associate _REG method to a region
+ *
+ ******************************************************************************/
+
+void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj)
+{
+	acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+	struct acpi_namespace_node *method_node;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *region_obj2;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_associate_reg_method);
+
+	region_obj2 = acpi_ns_get_secondary_object(region_obj);
+	if (!region_obj2) {
+		return_VOID;
 	}
 	}
 
 
-	region_obj->region.handler = handler_obj;
-	acpi_ut_add_reference(handler_obj);
+	node = region_obj->region.node->parent;
 
 
-	return_ACPI_STATUS(AE_OK);
+	/* Find any "_REG" method associated with this region definition */
+
+	status =
+	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+				     &method_node);
+	if (ACPI_SUCCESS(status)) {
+		/*
+		 * The _REG method is optional and there can be only one per region
+		 * definition. This will be executed when the handler is attached
+		 * or removed
+		 */
+		region_obj2->extra.method_REG = method_node;
+	}
+
+	return_VOID;
 }
 }
 
 
 /*******************************************************************************
 /*******************************************************************************
@@ -550,7 +598,18 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
 		return_ACPI_STATUS(AE_NOT_EXIST);
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 	}
 
 
-	if (region_obj2->extra.method_REG == NULL) {
+	if (region_obj2->extra.method_REG == NULL ||
+	    region_obj->region.handler == NULL ||
+	    !acpi_gbl_reg_methods_enabled) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* _REG(DISCONNECT) should be paired with _REG(CONNECT) */
+
+	if ((function == ACPI_REG_CONNECT &&
+	     region_obj->common.flags & AOPOBJ_REG_CONNECTED) ||
+	    (function == ACPI_REG_DISCONNECT &&
+	     !(region_obj->common.flags & AOPOBJ_REG_CONNECTED))) {
 		return_ACPI_STATUS(AE_OK);
 		return_ACPI_STATUS(AE_OK);
 	}
 	}
 
 
@@ -599,6 +658,16 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
 	status = acpi_ns_evaluate(info);
 	status = acpi_ns_evaluate(info);
 	acpi_ut_remove_reference(args[1]);
 	acpi_ut_remove_reference(args[1]);
 
 
+	if (ACPI_FAILURE(status)) {
+		goto cleanup2;
+	}
+
+	if (function == ACPI_REG_CONNECT) {
+		region_obj->common.flags |= AOPOBJ_REG_CONNECTED;
+	} else {
+		region_obj->common.flags &= ~AOPOBJ_REG_CONNECTED;
+	}
+
 cleanup2:
 cleanup2:
 	acpi_ut_remove_reference(args[0]);
 	acpi_ut_remove_reference(args[0]);
 
 
@@ -613,24 +682,25 @@ cleanup1:
  *
  *
  * PARAMETERS:  node            - Namespace node for the device
  * PARAMETERS:  node            - Namespace node for the device
  *              space_id        - The address space ID
  *              space_id        - The address space ID
+ *              function        - Passed to _REG: On (1) or Off (0)
  *
  *
- * RETURN:      Status
+ * RETURN:      None
  *
  *
  * DESCRIPTION: Run all _REG methods for the input Space ID;
  * DESCRIPTION: Run all _REG methods for the input Space ID;
  *              Note: assumes namespace is locked, or system init time.
  *              Note: assumes namespace is locked, or system init time.
  *
  *
  ******************************************************************************/
  ******************************************************************************/
 
 
-acpi_status
+void
 acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
 acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
-			    acpi_adr_space_type space_id)
+			    acpi_adr_space_type space_id, u32 function)
 {
 {
-	acpi_status status;
 	struct acpi_reg_walk_info info;
 	struct acpi_reg_walk_info info;
 
 
 	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
 	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
 
 
 	info.space_id = space_id;
 	info.space_id = space_id;
+	info.function = function;
 	info.reg_run_count = 0;
 	info.reg_run_count = 0;
 
 
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
@@ -643,9 +713,9 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
 	 * regions and _REG methods. (i.e. handlers must be installed for all
 	 * regions and _REG methods. (i.e. handlers must be installed for all
 	 * regions of this Space ID before we can run any _REG methods)
 	 * regions of this Space ID before we can run any _REG methods)
 	 */
 	 */
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
-					ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
-					NULL, &info, NULL);
+	(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+				     ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
+				     &info, NULL);
 
 
 	/* Special case for EC: handle "orphan" _REG methods with no region */
 	/* Special case for EC: handle "orphan" _REG methods with no region */
 
 
@@ -658,7 +728,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
 			      info.reg_run_count,
 			      info.reg_run_count,
 			      acpi_ut_get_region_name(info.space_id)));
 			      acpi_ut_get_region_name(info.space_id)));
 
 
-	return_ACPI_STATUS(status);
+	return_VOID;
 }
 }
 
 
 /*******************************************************************************
 /*******************************************************************************
@@ -717,7 +787,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
 	}
 	}
 
 
 	info->reg_run_count++;
 	info->reg_run_count++;
-	status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
+	status = acpi_ev_execute_reg_method(obj_desc, info->function);
 	return (status);
 	return (status);
 }
 }
 
 

+ 35 - 80
drivers/acpi/acpica/evrgnini.c

@@ -507,9 +507,6 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 	acpi_adr_space_type space_id;
 	acpi_adr_space_type space_id;
 	struct acpi_namespace_node *node;
 	struct acpi_namespace_node *node;
 	acpi_status status;
 	acpi_status status;
-	struct acpi_namespace_node *method_node;
-	acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
-	union acpi_operand_object *region_obj2;
 
 
 	ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
 	ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
 
 
@@ -521,38 +518,15 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 		return_ACPI_STATUS(AE_OK);
 		return_ACPI_STATUS(AE_OK);
 	}
 	}
 
 
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
+	acpi_ev_associate_reg_method(region_obj);
+	region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
 
 
 	node = region_obj->region.node->parent;
 	node = region_obj->region.node->parent;
 	space_id = region_obj->region.space_id;
 	space_id = region_obj->region.space_id;
 
 
-	/* Setup defaults */
-
-	region_obj->region.handler = NULL;
-	region_obj2->extra.method_REG = NULL;
-	region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE);
-	region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
-
-	/* Find any "_REG" method associated with this region definition */
-
-	status =
-	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
-				     &method_node);
-	if (ACPI_SUCCESS(status)) {
-		/*
-		 * The _REG method is optional and there can be only one per region
-		 * definition. This will be executed when the handler is attached
-		 * or removed
-		 */
-		region_obj2->extra.method_REG = method_node;
-	}
-
 	/*
 	/*
 	 * The following loop depends upon the root Node having no parent
 	 * The following loop depends upon the root Node having no parent
-	 * ie: acpi_gbl_root_node->parent_entry being set to NULL
+	 * ie: acpi_gbl_root_node->Parent being set to NULL
 	 */
 	 */
 	while (node) {
 	while (node) {
 
 
@@ -566,18 +540,10 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 
 
 			switch (node->type) {
 			switch (node->type) {
 			case ACPI_TYPE_DEVICE:
 			case ACPI_TYPE_DEVICE:
-
-				handler_obj = obj_desc->device.handler;
-				break;
-
 			case ACPI_TYPE_PROCESSOR:
 			case ACPI_TYPE_PROCESSOR:
-
-				handler_obj = obj_desc->processor.handler;
-				break;
-
 			case ACPI_TYPE_THERMAL:
 			case ACPI_TYPE_THERMAL:
 
 
-				handler_obj = obj_desc->thermal_zone.handler;
+				handler_obj = obj_desc->common_notify.handler;
 				break;
 				break;
 
 
 			case ACPI_TYPE_METHOD:
 			case ACPI_TYPE_METHOD:
@@ -602,60 +568,49 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 				break;
 				break;
 			}
 			}
 
 
-			while (handler_obj) {
-
-				/* Is this handler of the correct type? */
+			handler_obj =
+			    acpi_ev_find_region_handler(space_id, handler_obj);
+			if (handler_obj) {
 
 
-				if (handler_obj->address_space.space_id ==
-				    space_id) {
+				/* Found correct handler */
 
 
-					/* Found correct handler */
+				ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+						  "Found handler %p for region %p in obj %p\n",
+						  handler_obj, region_obj,
+						  obj_desc));
 
 
-					ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-							  "Found handler %p for region %p in obj %p\n",
-							  handler_obj,
+				status =
+				    acpi_ev_attach_region(handler_obj,
 							  region_obj,
 							  region_obj,
-							  obj_desc));
+							  acpi_ns_locked);
 
 
+				/*
+				 * Tell all users that this region is usable by
+				 * running the _REG method
+				 */
+				if (acpi_ns_locked) {
 					status =
 					status =
-					    acpi_ev_attach_region(handler_obj,
-								  region_obj,
-								  acpi_ns_locked);
-
-					/*
-					 * Tell all users that this region is usable by
-					 * running the _REG method
-					 */
-					if (acpi_ns_locked) {
-						status =
-						    acpi_ut_release_mutex
-						    (ACPI_MTX_NAMESPACE);
-						if (ACPI_FAILURE(status)) {
-							return_ACPI_STATUS
-							    (status);
-						}
+					    acpi_ut_release_mutex
+					    (ACPI_MTX_NAMESPACE);
+					if (ACPI_FAILURE(status)) {
+						return_ACPI_STATUS(status);
 					}
 					}
+				}
 
 
+				status =
+				    acpi_ev_execute_reg_method(region_obj,
+							       ACPI_REG_CONNECT);
+
+				if (acpi_ns_locked) {
 					status =
 					status =
-					    acpi_ev_execute_reg_method
-					    (region_obj, ACPI_REG_CONNECT);
-
-					if (acpi_ns_locked) {
-						status =
-						    acpi_ut_acquire_mutex
-						    (ACPI_MTX_NAMESPACE);
-						if (ACPI_FAILURE(status)) {
-							return_ACPI_STATUS
-							    (status);
-						}
+					    acpi_ut_acquire_mutex
+					    (ACPI_MTX_NAMESPACE);
+					if (ACPI_FAILURE(status)) {
+						return_ACPI_STATUS(status);
 					}
 					}
-
-					return_ACPI_STATUS(AE_OK);
 				}
 				}
 
 
-				/* Try next handler in the list */
-
-				handler_obj = handler_obj->address_space.next;
+				return_ACPI_STATUS(AE_OK);
 			}
 			}
 		}
 		}
 
 

+ 4 - 5
drivers/acpi/acpica/evxface.c

@@ -879,9 +879,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
 
 
 	ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
 	ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
 
 
-	status =
-	    acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE,
-					address, context);
+	status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type,
+					     FALSE, address, context);
 
 
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
@@ -914,8 +913,8 @@ acpi_install_gpe_raw_handler(acpi_handle gpe_device,
 
 
 	ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
 	ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
 
 
-	status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE,
-					     address, context);
+	status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type,
+					     TRUE, address, context);
 
 
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }

+ 3 - 35
drivers/acpi/acpica/evxfregn.c

@@ -112,41 +112,9 @@ acpi_install_address_space_handler(acpi_handle device,
 		goto unlock_and_exit;
 		goto unlock_and_exit;
 	}
 	}
 
 
-	/*
-	 * For the default space_IDs, (the IDs for which there are default region handlers
-	 * installed) Only execute the _REG methods if the global initialization _REG
-	 * methods have already been run (via acpi_initialize_objects). In other words,
-	 * we will defer the execution of the _REG methods for these space_IDs until
-	 * execution of acpi_initialize_objects. This is done because we need the handlers
-	 * for the default spaces (mem/io/pci/table) to be installed before we can run
-	 * any control methods (or _REG methods). There is known BIOS code that depends
-	 * on this.
-	 *
-	 * For all other space_IDs, we can safely execute the _REG methods immediately.
-	 * This means that for IDs like embedded_controller, this function should be called
-	 * only after acpi_enable_subsystem has been called.
-	 */
-	switch (space_id) {
-	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-	case ACPI_ADR_SPACE_SYSTEM_IO:
-	case ACPI_ADR_SPACE_PCI_CONFIG:
-	case ACPI_ADR_SPACE_DATA_TABLE:
-
-		if (!acpi_gbl_reg_methods_executed) {
-
-			/* We will defer execution of the _REG methods for this space */
-			goto unlock_and_exit;
-		}
-		break;
-
-	default:
-
-		break;
-	}
-
 	/* Run all _REG methods for this address space */
 	/* Run all _REG methods for this address space */
 
 
-	status = acpi_ev_execute_reg_methods(node, space_id);
+	acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
 
 
 unlock_and_exit:
 unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
@@ -215,8 +183,8 @@ acpi_remove_address_space_handler(acpi_handle device,
 
 
 	/* Find the address handler the user requested */
 	/* Find the address handler the user requested */
 
 
-	handler_obj = obj_desc->device.handler;
-	last_obj_ptr = &obj_desc->device.handler;
+	handler_obj = obj_desc->common_notify.handler;
+	last_obj_ptr = &obj_desc->common_notify.handler;
 	while (handler_obj) {
 	while (handler_obj) {
 
 
 		/* We have a handler, see if user requested this one */
 		/* We have a handler, see if user requested this one */

+ 4 - 4
drivers/acpi/acpica/exconfig.c

@@ -358,8 +358,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 		}
 		}
 
 
 		/*
 		/*
-		 * If the Region Address and Length have not been previously evaluated,
-		 * evaluate them now and save the results.
+		 * If the Region Address and Length have not been previously
+		 * evaluated, evaluate them now and save the results.
 		 */
 		 */
 		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
 		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
 			status = acpi_ds_get_region_arguments(obj_desc);
 			status = acpi_ds_get_region_arguments(obj_desc);
@@ -454,8 +454,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 		}
 		}
 
 
 		/*
 		/*
-		 * Copy the table from the buffer because the buffer could be modified
-		 * or even deleted in the future
+		 * Copy the table from the buffer because the buffer could be
+		 * modified or even deleted in the future
 		 */
 		 */
 		table = ACPI_ALLOCATE(length);
 		table = ACPI_ALLOCATE(length);
 		if (!table) {
 		if (!table) {

+ 4 - 5
drivers/acpi/acpica/exconvrt.c

@@ -227,8 +227,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
 		/* Copy the integer to the buffer, LSB first */
 		/* Copy the integer to the buffer, LSB first */
 
 
 		new_buf = return_desc->buffer.pointer;
 		new_buf = return_desc->buffer.pointer;
-		memcpy(new_buf,
-		       &obj_desc->integer.value, acpi_gbl_integer_byte_width);
+		memcpy(new_buf, &obj_desc->integer.value,
+		       acpi_gbl_integer_byte_width);
 		break;
 		break;
 
 
 	case ACPI_TYPE_STRING:
 	case ACPI_TYPE_STRING:
@@ -354,9 +354,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
 
 
 			/* Get one hex digit, most significant digits first */
 			/* Get one hex digit, most significant digits first */
 
 
-			string[k] =
-			    (u8) acpi_ut_hex_to_ascii_char(integer,
-							   ACPI_MUL_4(j));
+			string[k] = (u8)
+			    acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j));
 			k++;
 			k++;
 		}
 		}
 		break;
 		break;

+ 12 - 8
drivers/acpi/acpica/excreate.c

@@ -189,9 +189,9 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
 
 
 	/* Attach object to the Node */
 	/* Attach object to the Node */
 
 
-	status =
-	    acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
-				  operands[0], obj_desc, ACPI_TYPE_EVENT);
+	status = acpi_ns_attach_object((struct acpi_namespace_node *)
+				       walk_state->operands[0], obj_desc,
+				       ACPI_TYPE_EVENT);
 
 
 cleanup:
 cleanup:
 	/*
 	/*
@@ -326,9 +326,10 @@ acpi_ex_create_region(u8 * aml_start,
 	 * Remember location in AML stream of address & length
 	 * Remember location in AML stream of address & length
 	 * operands since they need to be evaluated at run time.
 	 * operands since they need to be evaluated at run time.
 	 */
 	 */
-	region_obj2 = obj_desc->common.next_object;
+	region_obj2 = acpi_ns_get_secondary_object(obj_desc);
 	region_obj2->extra.aml_start = aml_start;
 	region_obj2->extra.aml_start = aml_start;
 	region_obj2->extra.aml_length = aml_length;
 	region_obj2->extra.aml_length = aml_length;
+	region_obj2->extra.method_REG = NULL;
 	if (walk_state->scope_info) {
 	if (walk_state->scope_info) {
 		region_obj2->extra.scope_node =
 		region_obj2->extra.scope_node =
 		    walk_state->scope_info->scope.node;
 		    walk_state->scope_info->scope.node;
@@ -342,6 +343,10 @@ acpi_ex_create_region(u8 * aml_start,
 	obj_desc->region.address = 0;
 	obj_desc->region.address = 0;
 	obj_desc->region.length = 0;
 	obj_desc->region.length = 0;
 	obj_desc->region.node = node;
 	obj_desc->region.node = node;
+	obj_desc->region.handler = NULL;
+	obj_desc->common.flags &=
+	    ~(AOPOBJ_SETUP_COMPLETE | AOPOBJ_REG_CONNECTED |
+	      AOPOBJ_OBJECT_INITIALIZED);
 
 
 	/* Install the new region object in the parent Node */
 	/* Install the new region object in the parent Node */
 
 
@@ -492,10 +497,9 @@ acpi_ex_create_method(u8 * aml_start,
 	 * Disassemble the method flags. Split off the arg_count, Serialized
 	 * Disassemble the method flags. Split off the arg_count, Serialized
 	 * flag, and sync_level for efficiency.
 	 * flag, and sync_level for efficiency.
 	 */
 	 */
-	method_flags = (u8) operand[1]->integer.value;
-
-	obj_desc->method.param_count =
-	    (u8) (method_flags & AML_METHOD_ARG_COUNT);
+	method_flags = (u8)operand[1]->integer.value;
+	obj_desc->method.param_count = (u8)
+	    (method_flags & AML_METHOD_ARG_COUNT);
 
 
 	/*
 	/*
 	 * Get the sync_level. If method is serialized, a mutex will be
 	 * Get the sync_level. If method is serialized, a mutex will be

+ 56 - 347
drivers/acpi/acpica/exdebug.c

@@ -43,21 +43,11 @@
 
 
 #include <acpi/acpi.h>
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "accommon.h"
-#include "acnamesp.h"
 #include "acinterp.h"
 #include "acinterp.h"
-#include "acparser.h"
 
 
 #define _COMPONENT          ACPI_EXECUTER
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exdebug")
 ACPI_MODULE_NAME("exdebug")
 
 
-static union acpi_operand_object *acpi_gbl_trace_method_object = NULL;
-
-/* Local prototypes */
-
-#ifdef ACPI_DEBUG_OUTPUT
-static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
-#endif
-
 #ifndef ACPI_NO_ERROR_MESSAGES
 #ifndef ACPI_NO_ERROR_MESSAGES
 /*******************************************************************************
 /*******************************************************************************
  *
  *
@@ -80,7 +70,6 @@ static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
  * enabled if necessary.
  * enabled if necessary.
  *
  *
  ******************************************************************************/
  ******************************************************************************/
-
 void
 void
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 			u32 level, u32 index)
 			u32 level, u32 index)
@@ -99,20 +88,40 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 		return_VOID;
 		return_VOID;
 	}
 	}
 
 
-	/*
-	 * We will emit the current timer value (in microseconds) with each
-	 * debug output. Only need the lower 26 bits. This allows for 67
-	 * million microseconds or 67 seconds before rollover.
-	 */
-	timer = ((u32)acpi_os_get_timer() / 10);	/* (100 nanoseconds to microseconds) */
-	timer &= 0x03FFFFFF;
+	/* Null string or newline -- don't emit the line header */
+
+	if (source_desc &&
+	    (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) &&
+	    (source_desc->common.type == ACPI_TYPE_STRING)) {
+		if ((source_desc->string.length == 0) ||
+		    ((source_desc->string.length == 1) &&
+		     (*source_desc->string.pointer == '\n'))) {
+			acpi_os_printf("\n");
+			return_VOID;
+		}
+	}
 
 
 	/*
 	/*
 	 * Print line header as long as we are not in the middle of an
 	 * Print line header as long as we are not in the middle of an
 	 * object display
 	 * object display
 	 */
 	 */
 	if (!((level > 0) && index == 0)) {
 	if (!((level > 0) && index == 0)) {
-		acpi_os_printf("[ACPI Debug %.8u] %*s", timer, level, " ");
+		if (acpi_gbl_display_debug_timer) {
+			/*
+			 * We will emit the current timer value (in microseconds) with each
+			 * debug output. Only need the lower 26 bits. This allows for 67
+			 * million microseconds or 67 seconds before rollover.
+			 *
+			 * Convert 100 nanosecond units to microseconds
+			 */
+			timer = ((u32)acpi_os_get_timer() / 10);
+			timer &= 0x03FFFFFF;
+
+			acpi_os_printf("[ACPI Debug T=0x%8.8X] %*s", timer,
+				       level, " ");
+		} else {
+			acpi_os_printf("[ACPI Debug] %*s", level, " ");
+		}
 	}
 	}
 
 
 	/* Display the index for package output only */
 	/* Display the index for package output only */
@@ -127,8 +136,15 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 	}
 	}
 
 
 	if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
 	if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
-		acpi_os_printf("%s ",
-			       acpi_ut_get_object_type_name(source_desc));
+
+		/* No object type prefix needed for integers and strings */
+
+		if ((source_desc->common.type != ACPI_TYPE_INTEGER) &&
+		    (source_desc->common.type != ACPI_TYPE_STRING)) {
+			acpi_os_printf("%s ",
+				       acpi_ut_get_object_type_name
+				       (source_desc));
+		}
 
 
 		if (!acpi_ut_valid_internal_object(source_desc)) {
 		if (!acpi_ut_valid_internal_object(source_desc)) {
 			acpi_os_printf("%p, Invalid Internal Object!\n",
 			acpi_os_printf("%p, Invalid Internal Object!\n",
@@ -137,7 +153,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 		}
 		}
 	} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
 	} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
 		   ACPI_DESC_TYPE_NAMED) {
 		   ACPI_DESC_TYPE_NAMED) {
-		acpi_os_printf("%s: %p\n",
+		acpi_os_printf("%s (Node %p)\n",
 			       acpi_ut_get_type_name(((struct
 			       acpi_ut_get_type_name(((struct
 						       acpi_namespace_node *)
 						       acpi_namespace_node *)
 						      source_desc)->type),
 						      source_desc)->type),
@@ -175,14 +191,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 
 
 	case ACPI_TYPE_STRING:
 	case ACPI_TYPE_STRING:
 
 
-		acpi_os_printf("[0x%.2X] \"%s\"\n",
-			       source_desc->string.length,
-			       source_desc->string.pointer);
+		acpi_os_printf("\"%s\"\n", source_desc->string.pointer);
 		break;
 		break;
 
 
 	case ACPI_TYPE_PACKAGE:
 	case ACPI_TYPE_PACKAGE:
 
 
-		acpi_os_printf("[Contains 0x%.2X Elements]\n",
+		acpi_os_printf("(Contains 0x%.2X Elements):\n",
 			       source_desc->package.count);
 			       source_desc->package.count);
 
 
 		/* Output the entire contents of the package */
 		/* Output the entire contents of the package */
@@ -261,11 +275,14 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 			if (ACPI_GET_DESCRIPTOR_TYPE
 			if (ACPI_GET_DESCRIPTOR_TYPE
 			    (source_desc->reference.object) ==
 			    (source_desc->reference.object) ==
 			    ACPI_DESC_TYPE_NAMED) {
 			    ACPI_DESC_TYPE_NAMED) {
-				acpi_ex_do_debug_object(((struct
-							  acpi_namespace_node *)
+
+				/* Reference object is a namespace node */
+
+				acpi_ex_do_debug_object(ACPI_CAST_PTR
+							(union
+							 acpi_operand_object,
 							 source_desc->reference.
 							 source_desc->reference.
-							 object)->object,
-							level + 4, 0);
+							 object), level + 4, 0);
 			} else {
 			} else {
 				object_desc = source_desc->reference.object;
 				object_desc = source_desc->reference.object;
 				value = source_desc->reference.value;
 				value = source_desc->reference.value;
@@ -293,9 +310,14 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 				case ACPI_TYPE_PACKAGE:
 				case ACPI_TYPE_PACKAGE:
 
 
 					acpi_os_printf("Package[%u] = ", value);
 					acpi_os_printf("Package[%u] = ", value);
-					acpi_ex_do_debug_object(*source_desc->
-								reference.where,
-								level + 4, 0);
+					if (!(*source_desc->reference.where)) {
+						acpi_os_printf
+						    ("[Uninitialized Package Element]\n");
+					} else {
+						acpi_ex_do_debug_object
+						    (*source_desc->reference.
+						     where, level + 4, 0);
+					}
 					break;
 					break;
 
 
 				default:
 				default:
@@ -311,7 +333,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 
 
 	default:
 	default:
 
 
-		acpi_os_printf("%p\n", source_desc);
+		acpi_os_printf("(Descriptor %p)\n", source_desc);
 		break;
 		break;
 	}
 	}
 
 
@@ -319,316 +341,3 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 	return_VOID;
 	return_VOID;
 }
 }
 #endif
 #endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_interpreter_trace_enabled
- *
- * PARAMETERS:  name                - Whether method name should be matched,
- *                                    this should be checked before starting
- *                                    the tracer
- *
- * RETURN:      TRUE if interpreter trace is enabled.
- *
- * DESCRIPTION: Check whether interpreter trace is enabled
- *
- ******************************************************************************/
-
-static u8 acpi_ex_interpreter_trace_enabled(char *name)
-{
-
-	/* Check if tracing is enabled */
-
-	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) {
-		return (FALSE);
-	}
-
-	/*
-	 * Check if tracing is filtered:
-	 *
-	 * 1. If the tracer is started, acpi_gbl_trace_method_object should have
-	 *    been filled by the trace starter
-	 * 2. If the tracer is not started, acpi_gbl_trace_method_name should be
-	 *    matched if it is specified
-	 * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should
-	 *    not be cleared by the trace stopper during the first match
-	 */
-	if (acpi_gbl_trace_method_object) {
-		return (TRUE);
-	}
-	if (name &&
-	    (acpi_gbl_trace_method_name &&
-	     strcmp(acpi_gbl_trace_method_name, name))) {
-		return (FALSE);
-	}
-	if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) &&
-	    !acpi_gbl_trace_method_name) {
-		return (FALSE);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_get_trace_event_name
- *
- * PARAMETERS:  type            - Trace event type
- *
- * RETURN:      Trace event name.
- *
- * DESCRIPTION: Used to obtain the full trace event name.
- *
- ******************************************************************************/
-
-#ifdef ACPI_DEBUG_OUTPUT
-
-static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type)
-{
-	switch (type) {
-	case ACPI_TRACE_AML_METHOD:
-
-		return "Method";
-
-	case ACPI_TRACE_AML_OPCODE:
-
-		return "Opcode";
-
-	case ACPI_TRACE_AML_REGION:
-
-		return "Region";
-
-	default:
-
-		return "";
-	}
-}
-
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_trace_point
- *
- * PARAMETERS:  type                - Trace event type
- *              begin               - TRUE if before execution
- *              aml                 - Executed AML address
- *              pathname            - Object path
- *
- * RETURN:      None
- *
- * DESCRIPTION: Internal interpreter execution trace.
- *
- ******************************************************************************/
-
-void
-acpi_ex_trace_point(acpi_trace_event_type type,
-		    u8 begin, u8 *aml, char *pathname)
-{
-
-	ACPI_FUNCTION_NAME(ex_trace_point);
-
-	if (pathname) {
-		ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
-				  "%s %s [0x%p:%s] execution.\n",
-				  acpi_ex_get_trace_event_name(type),
-				  begin ? "Begin" : "End", aml, pathname));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
-				  "%s %s [0x%p] execution.\n",
-				  acpi_ex_get_trace_event_name(type),
-				  begin ? "Begin" : "End", aml));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_start_trace_method
- *
- * PARAMETERS:  method_node         - Node of the method
- *              obj_desc            - The method object
- *              walk_state          - current state, NULL if not yet executing
- *                                    a method.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Start control method execution trace
- *
- ******************************************************************************/
-
-void
-acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
-			   union acpi_operand_object *obj_desc,
-			   struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	char *pathname = NULL;
-	u8 enabled = FALSE;
-
-	ACPI_FUNCTION_NAME(ex_start_trace_method);
-
-	if (method_node) {
-		pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	enabled = acpi_ex_interpreter_trace_enabled(pathname);
-	if (enabled && !acpi_gbl_trace_method_object) {
-		acpi_gbl_trace_method_object = obj_desc;
-		acpi_gbl_original_dbg_level = acpi_dbg_level;
-		acpi_gbl_original_dbg_layer = acpi_dbg_layer;
-		acpi_dbg_level = ACPI_TRACE_LEVEL_ALL;
-		acpi_dbg_layer = ACPI_TRACE_LAYER_ALL;
-
-		if (acpi_gbl_trace_dbg_level) {
-			acpi_dbg_level = acpi_gbl_trace_dbg_level;
-		}
-		if (acpi_gbl_trace_dbg_layer) {
-			acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
-		}
-	}
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-exit:
-	if (enabled) {
-		ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE,
-				 obj_desc ? obj_desc->method.aml_start : NULL,
-				 pathname);
-	}
-	if (pathname) {
-		ACPI_FREE(pathname);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_stop_trace_method
- *
- * PARAMETERS:  method_node         - Node of the method
- *              obj_desc            - The method object
- *              walk_state          - current state, NULL if not yet executing
- *                                    a method.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Stop control method execution trace
- *
- ******************************************************************************/
-
-void
-acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
-			  union acpi_operand_object *obj_desc,
-			  struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	char *pathname = NULL;
-	u8 enabled;
-
-	ACPI_FUNCTION_NAME(ex_stop_trace_method);
-
-	if (method_node) {
-		pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto exit_path;
-	}
-
-	enabled = acpi_ex_interpreter_trace_enabled(NULL);
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-	if (enabled) {
-		ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE,
-				 obj_desc ? obj_desc->method.aml_start : NULL,
-				 pathname);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto exit_path;
-	}
-
-	/* Check whether the tracer should be stopped */
-
-	if (acpi_gbl_trace_method_object == obj_desc) {
-
-		/* Disable further tracing if type is one-shot */
-
-		if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) {
-			acpi_gbl_trace_method_name = NULL;
-		}
-
-		acpi_dbg_level = acpi_gbl_original_dbg_level;
-		acpi_dbg_layer = acpi_gbl_original_dbg_layer;
-		acpi_gbl_trace_method_object = NULL;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-exit_path:
-	if (pathname) {
-		ACPI_FREE(pathname);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_start_trace_opcode
- *
- * PARAMETERS:  op                  - The parser opcode object
- *              walk_state          - current state, NULL if not yet executing
- *                                    a method.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Start opcode execution trace
- *
- ******************************************************************************/
-
-void
-acpi_ex_start_trace_opcode(union acpi_parse_object *op,
-			   struct acpi_walk_state *walk_state)
-{
-
-	ACPI_FUNCTION_NAME(ex_start_trace_opcode);
-
-	if (acpi_ex_interpreter_trace_enabled(NULL) &&
-	    (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
-		ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE,
-				 op->common.aml, op->common.aml_op_name);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_stop_trace_opcode
- *
- * PARAMETERS:  op                  - The parser opcode object
- *              walk_state          - current state, NULL if not yet executing
- *                                    a method.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Stop opcode execution trace
- *
- ******************************************************************************/
-
-void
-acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
-			  struct acpi_walk_state *walk_state)
-{
-
-	ACPI_FUNCTION_NAME(ex_stop_trace_opcode);
-
-	if (acpi_ex_interpreter_trace_enabled(NULL) &&
-	    (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
-		ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE,
-				 op->common.aml, op->common.aml_op_name);
-	}
-}

+ 4 - 2
drivers/acpi/acpica/exdump.c

@@ -508,7 +508,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 			if (next) {
 			if (next) {
 				acpi_os_printf("(%s %2.2X)",
 				acpi_os_printf("(%s %2.2X)",
 					       acpi_ut_get_object_type_name
 					       acpi_ut_get_object_type_name
-					       (next), next->common.type);
+					       (next),
+					       next->address_space.space_id);
 
 
 				while (next->address_space.next) {
 				while (next->address_space.next) {
 					if ((next->common.type ==
 					if ((next->common.type ==
@@ -520,7 +521,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 					acpi_os_printf("->%p(%s %2.2X)", next,
 					acpi_os_printf("->%p(%s %2.2X)", next,
 						       acpi_ut_get_object_type_name
 						       acpi_ut_get_object_type_name
 						       (next),
 						       (next),
-						       next->common.type);
+						       next->address_space.
+						       space_id);
 
 
 					if ((next == start) || (next == data)) {
 					if ((next == start) || (next == data)) {
 						acpi_os_printf
 						acpi_os_printf

+ 42 - 32
drivers/acpi/acpica/exfield.c

@@ -167,10 +167,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
 		    || obj_desc->field.region_obj->region.space_id ==
 		    || obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_IPMI)) {
 		    ACPI_ADR_SPACE_IPMI)) {
 		/*
 		/*
-		 * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
-		 * the data and then directly access the region handler.
+		 * This is an SMBus, GSBus or IPMI read. We must create a buffer to
+		 * hold the data and then directly access the region handler.
 		 *
 		 *
-		 * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
+		 * Note: SMBus and GSBus protocol value is passed in upper 16-bits
+		 * of Function
 		 */
 		 */
 		if (obj_desc->field.region_obj->region.space_id ==
 		if (obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_SMBUS) {
 		    ACPI_ADR_SPACE_SMBUS) {
@@ -180,17 +181,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
 		} else if (obj_desc->field.region_obj->region.space_id ==
 		} else if (obj_desc->field.region_obj->region.space_id ==
 			   ACPI_ADR_SPACE_GSBUS) {
 			   ACPI_ADR_SPACE_GSBUS) {
 			accessor_type = obj_desc->field.attribute;
 			accessor_type = obj_desc->field.attribute;
-			length = acpi_ex_get_serial_access_length(accessor_type,
-								  obj_desc->
-								  field.
-								  access_length);
+			length =
+			    acpi_ex_get_serial_access_length(accessor_type,
+							     obj_desc->field.
+							     access_length);
 
 
 			/*
 			/*
 			 * Add additional 2 bytes for the generic_serial_bus data buffer:
 			 * Add additional 2 bytes for the generic_serial_bus data buffer:
 			 *
 			 *
-			 *     Status;      (Byte 0 of the data buffer)
-			 *     Length;      (Byte 1 of the data buffer)
-			 *     Data[x-1];   (Bytes 2-x of the arbitrary length data buffer)
+			 *     Status;    (Byte 0 of the data buffer)
+			 *     Length;    (Byte 1 of the data buffer)
+			 *     Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
 			 */
 			 */
 			length += 2;
 			length += 2;
 			function = ACPI_READ | (accessor_type << 16);
 			function = ACPI_READ | (accessor_type << 16);
@@ -216,6 +217,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
 							     buffer_desc->
 							     buffer_desc->
 							     buffer.pointer),
 							     buffer.pointer),
 					       function);
 					       function);
+
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		goto exit;
 		goto exit;
 	}
 	}
@@ -232,6 +234,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
 	 */
 	 */
 	length =
 	length =
 	    (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
 	    (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+
 	if (length > acpi_gbl_integer_byte_width) {
 	if (length > acpi_gbl_integer_byte_width) {
 
 
 		/* Field is too large for an Integer, create a Buffer instead */
 		/* Field is too large for an Integer, create a Buffer instead */
@@ -273,8 +276,10 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
 
 
 		/* Perform the write */
 		/* Perform the write */
 
 
-		status = acpi_ex_access_region(obj_desc, 0,
-					       (u64 *)buffer, ACPI_READ);
+		status =
+		    acpi_ex_access_region(obj_desc, 0, (u64 *)buffer,
+					  ACPI_READ);
+
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			acpi_ut_remove_reference(buffer_desc);
 			acpi_ut_remove_reference(buffer_desc);
@@ -366,19 +371,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 		    || obj_desc->field.region_obj->region.space_id ==
 		    || obj_desc->field.region_obj->region.space_id ==
 		    ACPI_ADR_SPACE_IPMI)) {
 		    ACPI_ADR_SPACE_IPMI)) {
 		/*
 		/*
-		 * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
-		 * mechanism and handoff the buffer directly to the handler. For
-		 * these address spaces, the buffer is bi-directional; on a write,
-		 * return data is returned in the same buffer.
+		 * This is an SMBus, GSBus or IPMI write. We will bypass the entire
+		 * field mechanism and handoff the buffer directly to the handler.
+		 * For these address spaces, the buffer is bi-directional; on a
+		 * write, return data is returned in the same buffer.
 		 *
 		 *
 		 * Source must be a buffer of sufficient size:
 		 * Source must be a buffer of sufficient size:
-		 * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
+		 * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or
+		 * ACPI_IPMI_BUFFER_SIZE.
 		 *
 		 *
-		 * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
+		 * Note: SMBus and GSBus protocol type is passed in upper 16-bits
+		 * of Function
 		 */
 		 */
 		if (source_desc->common.type != ACPI_TYPE_BUFFER) {
 		if (source_desc->common.type != ACPI_TYPE_BUFFER) {
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
-				    "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
+				    "SMBus/IPMI/GenericSerialBus write requires "
+				    "Buffer, found type %s",
 				    acpi_ut_get_object_type_name(source_desc)));
 				    acpi_ut_get_object_type_name(source_desc)));
 
 
 			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
 			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -392,17 +400,17 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 		} else if (obj_desc->field.region_obj->region.space_id ==
 		} else if (obj_desc->field.region_obj->region.space_id ==
 			   ACPI_ADR_SPACE_GSBUS) {
 			   ACPI_ADR_SPACE_GSBUS) {
 			accessor_type = obj_desc->field.attribute;
 			accessor_type = obj_desc->field.attribute;
-			length = acpi_ex_get_serial_access_length(accessor_type,
-								  obj_desc->
-								  field.
-								  access_length);
+			length =
+			    acpi_ex_get_serial_access_length(accessor_type,
+							     obj_desc->field.
+							     access_length);
 
 
 			/*
 			/*
 			 * Add additional 2 bytes for the generic_serial_bus data buffer:
 			 * Add additional 2 bytes for the generic_serial_bus data buffer:
 			 *
 			 *
-			 *     Status;      (Byte 0 of the data buffer)
-			 *     Length;      (Byte 1 of the data buffer)
-			 *     Data[x-1];   (Bytes 2-x of the arbitrary length data buffer)
+			 *     Status;    (Byte 0 of the data buffer)
+			 *     Length;    (Byte 1 of the data buffer)
+			 *     Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
 			 */
 			 */
 			length += 2;
 			length += 2;
 			function = ACPI_WRITE | (accessor_type << 16);
 			function = ACPI_WRITE | (accessor_type << 16);
@@ -414,7 +422,8 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
 
 		if (source_desc->buffer.length < length) {
 		if (source_desc->buffer.length < length) {
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
-				    "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
+				    "SMBus/IPMI/GenericSerialBus write requires "
+				    "Buffer of length %u, found length %u",
 				    length, source_desc->buffer.length));
 				    length, source_desc->buffer.length));
 
 
 			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
 			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
@@ -438,8 +447,8 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 		 * Perform the write (returns status and perhaps data in the
 		 * Perform the write (returns status and perhaps data in the
 		 * same buffer)
 		 * same buffer)
 		 */
 		 */
-		status = acpi_ex_access_region(obj_desc, 0,
-					       (u64 *) buffer, function);
+		status =
+		    acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, function);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 
 
 		*result_desc = buffer_desc;
 		*result_desc = buffer_desc;
@@ -460,7 +469,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 		}
 		}
 
 
 		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
 		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]:  Pin %u Bits %u\n",
+				  "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X  [TO]: Pin %u Bits %u\n",
 				  acpi_ut_get_type_name(source_desc->common.
 				  acpi_ut_get_type_name(source_desc->common.
 							type),
 							type),
 				  source_desc->common.type,
 				  source_desc->common.type,
@@ -476,8 +485,9 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
 
 		/* Perform the write */
 		/* Perform the write */
 
 
-		status = acpi_ex_access_region(obj_desc, 0,
-					       (u64 *)buffer, ACPI_WRITE);
+		status =
+		    acpi_ex_access_region(obj_desc, 0, (u64 *)buffer,
+					  ACPI_WRITE);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 		return_ACPI_STATUS(status);
 		return_ACPI_STATUS(status);
 	}
 	}

+ 20 - 15
drivers/acpi/acpica/exfldio.c

@@ -180,7 +180,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
 			 * byte, and a field with Dword access specified.
 			 * byte, and a field with Dword access specified.
 			 */
 			 */
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
-				    "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
+				    "Field [%4.4s] access width (%u bytes) "
+				    "too large for region [%4.4s] (length %u)",
 				    acpi_ut_get_node_name(obj_desc->
 				    acpi_ut_get_node_name(obj_desc->
 							  common_field.node),
 							  common_field.node),
 				    obj_desc->common_field.access_byte_width,
 				    obj_desc->common_field.access_byte_width,
@@ -194,7 +195,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
 		 * exceeds region length, indicate an error
 		 * exceeds region length, indicate an error
 		 */
 		 */
 		ACPI_ERROR((AE_INFO,
 		ACPI_ERROR((AE_INFO,
-			    "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
+			    "Field [%4.4s] Base+Offset+Width %u+%u+%u "
+			    "is beyond end of region [%4.4s] (length %u)",
 			    acpi_ut_get_node_name(obj_desc->common_field.node),
 			    acpi_ut_get_node_name(obj_desc->common_field.node),
 			    obj_desc->common_field.base_byte_offset,
 			    obj_desc->common_field.base_byte_offset,
 			    field_datum_byte_offset,
 			    field_datum_byte_offset,
@@ -638,15 +640,15 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
 
 
 			ACPI_ERROR((AE_INFO,
 			ACPI_ERROR((AE_INFO,
 				    "Unknown UpdateRule value: 0x%X",
 				    "Unknown UpdateRule value: 0x%X",
-				    (obj_desc->common_field.
-				     field_flags &
+				    (obj_desc->common_field.field_flags &
 				     AML_FIELD_UPDATE_RULE_MASK)));
 				     AML_FIELD_UPDATE_RULE_MASK)));
 			return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
 			return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
 		}
 		}
 	}
 	}
 
 
 	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
 	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
+			  "Mask %8.8X%8.8X, DatumOffset %X, Width %X, "
+			  "Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
 			  ACPI_FORMAT_UINT64(mask),
 			  ACPI_FORMAT_UINT64(mask),
 			  field_datum_byte_offset,
 			  field_datum_byte_offset,
 			  obj_desc->common_field.access_byte_width,
 			  obj_desc->common_field.access_byte_width,
@@ -655,8 +657,9 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
 
 
 	/* Write the merged value */
 	/* Write the merged value */
 
 
-	status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset,
-					&merged_value, ACPI_WRITE);
+	status =
+	    acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset,
+				   &merged_value, ACPI_WRITE);
 
 
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
@@ -764,8 +767,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
 		/* Get next input datum from the field */
 		/* Get next input datum from the field */
 
 
 		field_offset += obj_desc->common_field.access_byte_width;
 		field_offset += obj_desc->common_field.access_byte_width;
-		status = acpi_ex_field_datum_io(obj_desc, field_offset,
-						&raw_datum, ACPI_READ);
+		status =
+		    acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum,
+					   ACPI_READ);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 			return_ACPI_STATUS(status);
 		}
 		}
@@ -858,6 +862,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
 	new_buffer = NULL;
 	new_buffer = NULL;
 	required_length =
 	required_length =
 	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
 	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
+
 	/*
 	/*
 	 * We must have a buffer that is at least as long as the field
 	 * We must have a buffer that is at least as long as the field
 	 * we are writing to. This is because individual fields are
 	 * we are writing to. This is because individual fields are
@@ -932,9 +937,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
 		/* Write merged datum to the target field */
 		/* Write merged datum to the target field */
 
 
 		merged_datum &= mask;
 		merged_datum &= mask;
-		status = acpi_ex_write_with_update_rule(obj_desc, mask,
-							merged_datum,
-							field_offset);
+		status =
+		    acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum,
+						   field_offset);
 		if (ACPI_FAILURE(status)) {
 		if (ACPI_FAILURE(status)) {
 			goto exit;
 			goto exit;
 		}
 		}
@@ -990,9 +995,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
 	/* Write the last datum to the field */
 	/* Write the last datum to the field */
 
 
 	merged_datum &= mask;
 	merged_datum &= mask;
-	status = acpi_ex_write_with_update_rule(obj_desc,
-						mask, merged_datum,
-						field_offset);
+	status =
+	    acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum,
+					   field_offset);
 
 
 exit:
 exit:
 	/* Free temporary buffer if we used one */
 	/* Free temporary buffer if we used one */

+ 41 - 8
drivers/acpi/acpica/exmisc.c

@@ -98,9 +98,9 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
 
 
 		default:
 		default:
 
 
-			ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
+			ACPI_ERROR((AE_INFO, "Invalid Reference Class 0x%2.2X",
 				    obj_desc->reference.class));
 				    obj_desc->reference.class));
-			return_ACPI_STATUS(AE_AML_INTERNAL);
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
 		}
 		}
 		break;
 		break;
 
 
@@ -247,6 +247,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 	union acpi_operand_object *local_operand1 = operand1;
 	union acpi_operand_object *local_operand1 = operand1;
 	union acpi_operand_object *return_desc;
 	union acpi_operand_object *return_desc;
 	char *new_buf;
 	char *new_buf;
+	const char *type_string;
 	acpi_status status;
 	acpi_status status;
 
 
 	ACPI_FUNCTION_TRACE(ex_do_concatenate);
 	ACPI_FUNCTION_TRACE(ex_do_concatenate);
@@ -266,9 +267,41 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 		break;
 		break;
 
 
 	case ACPI_TYPE_STRING:
 	case ACPI_TYPE_STRING:
+		/*
+		 * Per the ACPI spec, Concatenate only supports int/str/buf.
+		 * However, we support all objects here as an extension.
+		 * This improves the usefulness of the Printf() macro.
+		 * 12/2015.
+		 */
+		switch (operand1->common.type) {
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_STRING:
+		case ACPI_TYPE_BUFFER:
+
+			status =
+			    acpi_ex_convert_to_string(operand1, &local_operand1,
+						      ACPI_IMPLICIT_CONVERT_HEX);
+			break;
+
+		default:
+			/*
+			 * Just emit a string containing the object type.
+			 */
+			type_string =
+			    acpi_ut_get_type_name(operand1->common.type);
+
+			local_operand1 = acpi_ut_create_string_object(((acpi_size) strlen(type_string) + 9));	/* 9 For "[Object]" */
+			if (!local_operand1) {
+				status = AE_NO_MEMORY;
+				goto cleanup;
+			}
 
 
-		status = acpi_ex_convert_to_string(operand1, &local_operand1,
-						   ACPI_IMPLICIT_CONVERT_HEX);
+			strcpy(local_operand1->string.pointer, "[");
+			strcat(local_operand1->string.pointer, type_string);
+			strcat(local_operand1->string.pointer, " Object]");
+			status = AE_OK;
+			break;
+		}
 		break;
 		break;
 
 
 	case ACPI_TYPE_BUFFER:
 	case ACPI_TYPE_BUFFER:
@@ -347,8 +380,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 		/* Concatenate the strings */
 		/* Concatenate the strings */
 
 
 		strcpy(new_buf, operand0->string.pointer);
 		strcpy(new_buf, operand0->string.pointer);
-		strcpy(new_buf + operand0->string.length,
-		       local_operand1->string.pointer);
+		strcat(new_buf, local_operand1->string.pointer);
 		break;
 		break;
 
 
 	case ACPI_TYPE_BUFFER:
 	case ACPI_TYPE_BUFFER:
@@ -591,8 +623,9 @@ acpi_ex_do_logical_op(u16 opcode,
 
 
 	case ACPI_TYPE_STRING:
 	case ACPI_TYPE_STRING:
 
 
-		status = acpi_ex_convert_to_string(operand1, &local_operand1,
-						   ACPI_IMPLICIT_CONVERT_HEX);
+		status =
+		    acpi_ex_convert_to_string(operand1, &local_operand1,
+					      ACPI_IMPLICIT_CONVERT_HEX);
 		break;
 		break;
 
 
 	case ACPI_TYPE_BUFFER:
 	case ACPI_TYPE_BUFFER:

+ 60 - 22
drivers/acpi/acpica/exmutex.c

@@ -185,8 +185,9 @@ acpi_ex_acquire_mutex_object(u16 timeout,
 	if (obj_desc == acpi_gbl_global_lock_mutex) {
 	if (obj_desc == acpi_gbl_global_lock_mutex) {
 		status = acpi_ev_acquire_global_lock(timeout);
 		status = acpi_ev_acquire_global_lock(timeout);
 	} else {
 	} else {
-		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
-						   timeout);
+		status =
+		    acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
+					      timeout);
 	}
 	}
 
 
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
@@ -243,20 +244,30 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
 	}
 	}
 
 
 	/*
 	/*
-	 * Current sync level must be less than or equal to the sync level of the
-	 * mutex. This mechanism provides some deadlock prevention
+	 * Current sync level must be less than or equal to the sync level
+	 * of the mutex. This mechanism provides some deadlock prevention.
 	 */
 	 */
 	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
 	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
 		ACPI_ERROR((AE_INFO,
 		ACPI_ERROR((AE_INFO,
-			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
+			    "Cannot acquire Mutex [%4.4s], "
+			    "current SyncLevel is too large (%u)",
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
 			    walk_state->thread->current_sync_level));
 			    walk_state->thread->current_sync_level));
 		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
 		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
 	}
 	}
 
 
-	status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Acquiring: Mutex SyncLevel %u, Thread SyncLevel %u, "
+			  "Depth %u TID %p\n",
+			  obj_desc->mutex.sync_level,
+			  walk_state->thread->current_sync_level,
+			  obj_desc->mutex.acquisition_depth,
+			  walk_state->thread));
+
+	status = acpi_ex_acquire_mutex_object((u16)time_desc->integer.value,
 					      obj_desc,
 					      obj_desc,
 					      walk_state->thread->thread_id);
 					      walk_state->thread->thread_id);
+
 	if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
 	if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
 
 
 		/* Save Thread object, original/current sync levels */
 		/* Save Thread object, original/current sync levels */
@@ -272,6 +283,12 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
 		acpi_ex_link_mutex(obj_desc, walk_state->thread);
 		acpi_ex_link_mutex(obj_desc, walk_state->thread);
 	}
 	}
 
 
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Acquired: Mutex SyncLevel %u, Thread SyncLevel %u, Depth %u\n",
+			  obj_desc->mutex.sync_level,
+			  walk_state->thread->current_sync_level,
+			  obj_desc->mutex.acquisition_depth));
+
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
 
 
@@ -356,9 +373,9 @@ acpi_status
 acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
 acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
 		      struct acpi_walk_state *walk_state)
 		      struct acpi_walk_state *walk_state)
 {
 {
-	acpi_status status = AE_OK;
 	u8 previous_sync_level;
 	u8 previous_sync_level;
 	struct acpi_thread_state *owner_thread;
 	struct acpi_thread_state *owner_thread;
+	acpi_status status = AE_OK;
 
 
 	ACPI_FUNCTION_TRACE(ex_release_mutex);
 	ACPI_FUNCTION_TRACE(ex_release_mutex);
 
 
@@ -409,7 +426,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
 	 */
 	 */
 	if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
 	if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
 		ACPI_ERROR((AE_INFO,
 		ACPI_ERROR((AE_INFO,
-			    "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
+			    "Cannot release Mutex [%4.4s], SyncLevel mismatch: "
+			    "mutex %u current %u",
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
 			    acpi_ut_get_node_name(obj_desc->mutex.node),
 			    obj_desc->mutex.sync_level,
 			    obj_desc->mutex.sync_level,
 			    walk_state->thread->current_sync_level));
 			    walk_state->thread->current_sync_level));
@@ -424,6 +442,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
 	previous_sync_level =
 	previous_sync_level =
 	    owner_thread->acquired_mutex_list->mutex.original_sync_level;
 	    owner_thread->acquired_mutex_list->mutex.original_sync_level;
 
 
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Releasing: Object SyncLevel %u, Thread SyncLevel %u, "
+			  "Prev SyncLevel %u, Depth %u TID %p\n",
+			  obj_desc->mutex.sync_level,
+			  walk_state->thread->current_sync_level,
+			  previous_sync_level,
+			  obj_desc->mutex.acquisition_depth,
+			  walk_state->thread));
+
 	status = acpi_ex_release_mutex_object(obj_desc);
 	status = acpi_ex_release_mutex_object(obj_desc);
 	if (ACPI_FAILURE(status)) {
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 		return_ACPI_STATUS(status);
@@ -436,6 +463,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
 		owner_thread->current_sync_level = previous_sync_level;
 		owner_thread->current_sync_level = previous_sync_level;
 	}
 	}
 
 
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Released: Object SyncLevel %u, Thread SyncLevel, %u, "
+			  "Prev SyncLevel %u, Depth %u\n",
+			  obj_desc->mutex.sync_level,
+			  walk_state->thread->current_sync_level,
+			  previous_sync_level,
+			  obj_desc->mutex.acquisition_depth));
+
 	return_ACPI_STATUS(status);
 	return_ACPI_STATUS(status);
 }
 }
 
 
@@ -462,21 +497,17 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
 	union acpi_operand_object *next = thread->acquired_mutex_list;
 	union acpi_operand_object *next = thread->acquired_mutex_list;
 	union acpi_operand_object *obj_desc;
 	union acpi_operand_object *obj_desc;
 
 
-	ACPI_FUNCTION_NAME(ex_release_all_mutexes);
+	ACPI_FUNCTION_TRACE(ex_release_all_mutexes);
 
 
 	/* Traverse the list of owned mutexes, releasing each one */
 	/* Traverse the list of owned mutexes, releasing each one */
 
 
 	while (next) {
 	while (next) {
 		obj_desc = next;
 		obj_desc = next;
-		next = obj_desc->mutex.next;
-
-		obj_desc->mutex.prev = NULL;
-		obj_desc->mutex.next = NULL;
-		obj_desc->mutex.acquisition_depth = 0;
-
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Force-releasing held mutex: %p\n",
-				  obj_desc));
+				  "Mutex [%4.4s] force-release, SyncLevel %u Depth %u\n",
+				  obj_desc->mutex.node->name.ascii,
+				  obj_desc->mutex.sync_level,
+				  obj_desc->mutex.acquisition_depth));
 
 
 		/* Release the mutex, special case for Global Lock */
 		/* Release the mutex, special case for Global Lock */
 
 
@@ -489,14 +520,21 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
 			acpi_os_release_mutex(obj_desc->mutex.os_mutex);
 			acpi_os_release_mutex(obj_desc->mutex.os_mutex);
 		}
 		}
 
 
-		/* Mark mutex unowned */
-
-		obj_desc->mutex.owner_thread = NULL;
-		obj_desc->mutex.thread_id = 0;
-
 		/* Update Thread sync_level (Last mutex is the important one) */
 		/* Update Thread sync_level (Last mutex is the important one) */
 
 
 		thread->current_sync_level =
 		thread->current_sync_level =
 		    obj_desc->mutex.original_sync_level;
 		    obj_desc->mutex.original_sync_level;
+
+		/* Mark mutex unowned */
+
+		next = obj_desc->mutex.next;
+
+		obj_desc->mutex.prev = NULL;
+		obj_desc->mutex.next = NULL;
+		obj_desc->mutex.acquisition_depth = 0;
+		obj_desc->mutex.owner_thread = NULL;
+		obj_desc->mutex.thread_id = 0;
 	}
 	}
+
+	return_VOID;
 }
 }

+ 2 - 2
drivers/acpi/acpica/exnames.c

@@ -164,8 +164,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
 	ACPI_FUNCTION_TRACE(ex_name_segment);
 	ACPI_FUNCTION_TRACE(ex_name_segment);
 
 
 	/*
 	/*
-	 * If first character is a digit, then we know that we aren't looking at a
-	 * valid name segment
+	 * If first character is a digit, then we know that we aren't looking
+	 * at a valid name segment
 	 */
 	 */
 	char_buf[0] = *aml_address;
 	char_buf[0] = *aml_address;
 
 

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff