Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	net/sched/act_police.c
	net/sched/sch_drr.c
	net/sched/sch_hfsc.c
	net/sched/sch_prio.c
	net/sched/sch_red.c
	net/sched/sch_tbf.c

In net-next the drop methods of the packet schedulers got removed, so
the bug fixes to them in 'net' are irrelevant.

A packet action unload crash fix conflicts with the addition of the
new firstuse timestamp.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 9 years ago
parent
commit
1578b0a5e9
100 changed files with 637 additions and 788 deletions
  1. 32 4
      Documentation/DocBook/device-drivers.tmpl
  2. 1 0
      Documentation/arm64/silicon-errata.txt
  3. 1 0
      Documentation/devicetree/bindings/display/imx/ldb.txt
  4. 15 130
      Documentation/filesystems/devpts.txt
  5. 82 11
      Documentation/kdump/gdbmacros.txt
  6. 4 1
      Documentation/security/keys.txt
  7. 3 0
      MAINTAINERS
  8. 1 1
      Makefile
  9. 4 27
      arch/arc/Kconfig
  10. 1 1
      arch/arc/Makefile
  11. 0 2
      arch/arc/boot/dts/abilis_tb100.dtsi
  12. 0 2
      arch/arc/boot/dts/abilis_tb101.dtsi
  13. 0 1
      arch/arc/boot/dts/axc001.dtsi
  14. 0 1
      arch/arc/boot/dts/axc003.dtsi
  15. 0 1
      arch/arc/boot/dts/axc003_idu.dtsi
  16. 0 1
      arch/arc/boot/dts/eznps.dts
  17. 0 1
      arch/arc/boot/dts/nsim_700.dts
  18. 0 1
      arch/arc/boot/dts/nsimosci.dts
  19. 0 1
      arch/arc/boot/dts/nsimosci_hs.dts
  20. 0 1
      arch/arc/boot/dts/nsimosci_hs_idu.dts
  21. 0 1
      arch/arc/boot/dts/skeleton.dtsi
  22. 0 1
      arch/arc/boot/dts/skeleton_hs.dtsi
  23. 0 1
      arch/arc/boot/dts/skeleton_hs_idu.dtsi
  24. 0 1
      arch/arc/boot/dts/vdk_axc003.dtsi
  25. 0 1
      arch/arc/boot/dts/vdk_axc003_idu.dtsi
  26. 4 41
      arch/arc/include/asm/atomic.h
  27. 2 2
      arch/arc/include/asm/entry-compact.h
  28. 1 1
      arch/arc/include/asm/mmu_context.h
  29. 1 1
      arch/arc/include/asm/pgtable.h
  30. 1 1
      arch/arc/include/asm/processor.h
  31. 1 1
      arch/arc/include/asm/smp.h
  32. 0 292
      arch/arc/include/asm/spinlock.h
  33. 1 1
      arch/arc/include/asm/thread_info.h
  34. 1 1
      arch/arc/include/asm/uaccess.h
  35. 1 1
      arch/arc/include/uapi/asm/swab.h
  36. 2 16
      arch/arc/kernel/entry-compact.S
  37. 2 4
      arch/arc/kernel/intc-compact.c
  38. 1 1
      arch/arc/kernel/perf_event.c
  39. 1 1
      arch/arc/kernel/setup.c
  40. 1 1
      arch/arc/kernel/signal.c
  41. 1 1
      arch/arc/kernel/troubleshoot.c
  42. 3 3
      arch/arc/mm/cache.c
  43. 1 1
      arch/arc/mm/dma.c
  44. 1 1
      arch/arm/kernel/ptrace.c
  45. 1 1
      arch/arm/mach-vexpress/spc.c
  46. 21 0
      arch/arm64/Kconfig
  47. 13 12
      arch/arm64/Kconfig.debug
  48. 3 1
      arch/arm64/Makefile
  49. 2 2
      arch/arm64/include/asm/elf.h
  50. 2 1
      arch/arm64/include/asm/memory.h
  51. 2 10
      arch/arm64/include/asm/page.h
  52. 0 13
      arch/arm64/include/asm/uaccess.h
  53. 1 1
      arch/arm64/include/asm/unistd.h
  54. 8 0
      arch/arm64/include/asm/unistd32.h
  55. 7 1
      arch/arm64/kernel/cpuinfo.c
  56. 3 2
      arch/arm64/kernel/traps.c
  57. 21 15
      arch/arm64/kvm/hyp/vgic-v3-sr.c
  58. 12 1
      arch/arm64/kvm/sys_regs.c
  59. 7 1
      arch/arm64/mm/dump.c
  60. 14 0
      arch/arm64/mm/hugetlbpage.c
  61. 2 0
      arch/parisc/include/asm/traps.h
  62. 3 2
      arch/parisc/kernel/processor.c
  63. 0 5
      arch/parisc/kernel/time.c
  64. 10 3
      arch/parisc/kernel/unaligned.c
  65. 14 8
      arch/parisc/kernel/unwind.c
  66. 3 3
      arch/powerpc/include/asm/reg.h
  67. 1 0
      arch/powerpc/kernel/prom_init.c
  68. 20 2
      arch/powerpc/mm/hash_utils_64.c
  69. 1 4
      arch/powerpc/mm/pgtable-book3s64.c
  70. 10 13
      arch/powerpc/mm/pgtable-radix.c
  71. 1 1
      arch/powerpc/platforms/512x/clock-commonclk.c
  72. 1 1
      arch/powerpc/platforms/cell/spufs/coredump.c
  73. 33 16
      arch/powerpc/platforms/pseries/eeh_pseries.c
  74. 12 10
      arch/x86/kvm/cpuid.c
  75. 4 4
      arch/x86/kvm/mmu.c
  76. 11 1
      arch/x86/kvm/x86.c
  77. 0 9
      drivers/acpi/acpi_processor.c
  78. 6 3
      drivers/acpi/acpi_video.c
  79. 9 14
      drivers/acpi/acpica/hwregs.c
  80. 1 1
      drivers/acpi/bus.c
  81. 22 7
      drivers/acpi/ec.c
  82. 1 1
      drivers/acpi/internal.h
  83. 9 0
      drivers/acpi/processor_throttling.c
  84. 1 0
      drivers/clk/Kconfig
  85. 5 5
      drivers/clk/microchip/clk-pic32mzda.c
  86. 1 1
      drivers/cpufreq/cpufreq.c
  87. 10 4
      drivers/cpufreq/intel_pstate.c
  88. 4 3
      drivers/dma-buf/dma-buf.c
  89. 68 4
      drivers/dma-buf/reservation.c
  90. 2 1
      drivers/edac/edac_mc.c
  91. 22 13
      drivers/edac/sb_edac.c
  92. 3 1
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  93. 12 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  94. 9 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  95. 7 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
  96. 17 7
      drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
  97. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
  98. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
  99. 10 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
  100. 3 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

+ 32 - 4
Documentation/DocBook/device-drivers.tmpl

@@ -128,16 +128,44 @@ X!Edrivers/base/interface.c
 !Edrivers/base/platform.c
 !Edrivers/base/platform.c
 !Edrivers/base/bus.c
 !Edrivers/base/bus.c
      </sect1>
      </sect1>
-     <sect1><title>Device Drivers DMA Management</title>
+     <sect1>
+       <title>Buffer Sharing and Synchronization</title>
+       <para>
+         The dma-buf subsystem provides the framework for sharing buffers
+         for hardware (DMA) access across multiple device drivers and
+         subsystems, and for synchronizing asynchronous hardware access.
+       </para>
+       <para>
+         This is used, for example, by drm "prime" multi-GPU support, but
+         is of course not limited to GPU use cases.
+       </para>
+       <para>
+         The three main components of this are: (1) dma-buf, representing
+         a sg_table and exposed to userspace as a file descriptor to allow
+         passing between devices, (2) fence, which provides a mechanism
+         to signal when one device as finished access, and (3) reservation,
+         which manages the shared or exclusive fence(s) associated with
+         the buffer.
+       </para>
+       <sect2><title>dma-buf</title>
 !Edrivers/dma-buf/dma-buf.c
 !Edrivers/dma-buf/dma-buf.c
+!Iinclude/linux/dma-buf.h
+       </sect2>
+       <sect2><title>reservation</title>
+!Pdrivers/dma-buf/reservation.c Reservation Object Overview
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
+       </sect2>
+       <sect2><title>fence</title>
 !Edrivers/dma-buf/fence.c
 !Edrivers/dma-buf/fence.c
-!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/fence.h
 !Iinclude/linux/fence.h
+!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/seqno-fence.h
 !Iinclude/linux/seqno-fence.h
-!Edrivers/dma-buf/reservation.c
-!Iinclude/linux/reservation.h
 !Edrivers/dma-buf/sync_file.c
 !Edrivers/dma-buf/sync_file.c
 !Iinclude/linux/sync_file.h
 !Iinclude/linux/sync_file.h
+       </sect2>
+     </sect1>
+     <sect1><title>Device Drivers DMA Management</title>
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
 !Edrivers/base/dma-mapping.c
      </sect1>
      </sect1>

+ 1 - 0
Documentation/arm64/silicon-errata.txt

@@ -56,6 +56,7 @@ stable kernels.
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
 | Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |
 | Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |

+ 1 - 0
Documentation/devicetree/bindings/display/imx/ldb.txt

@@ -62,6 +62,7 @@ Required properties:
    display-timings are used instead.
    display-timings are used instead.
 
 
 Optional properties (required if display-timings are used):
 Optional properties (required if display-timings are used):
+ - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
  - display-timings : A node that describes the display timings as defined in
  - display-timings : A node that describes the display timings as defined in
    Documentation/devicetree/bindings/display/display-timing.txt.
    Documentation/devicetree/bindings/display/display-timing.txt.
  - fsl,data-mapping : should be "spwg" or "jeida"
  - fsl,data-mapping : should be "spwg" or "jeida"

+ 15 - 130
Documentation/filesystems/devpts.txt

@@ -1,141 +1,26 @@
+Each mount of the devpts filesystem is now distinct such that ptys
+and their indicies allocated in one mount are independent from ptys
+and their indicies in all other mounts.
 
 
-To support containers, we now allow multiple instances of devpts filesystem,
-such that indices of ptys allocated in one instance are independent of indices
-allocated in other instances of devpts.
+All mounts of the devpts filesystem now create a /dev/pts/ptmx node
+with permissions 0000.
 
 
-To preserve backward compatibility, this support for multiple instances is
-enabled only if:
+To retain backwards compatibility the a ptmx device node (aka any node
+created with "mknod name c 5 2") when opened will look for an instance
+of devpts under the name "pts" in the same directory as the ptmx device
+node.
 
 
-	- CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
-	- '-o newinstance' mount option is specified while mounting devpts
-
-IOW, devpts now supports both single-instance and multi-instance semantics.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
-this referred to as the "legacy" mode. In this mode, the new mount options
-(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
-on console.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
-'newinstance' option (as in current start-up scripts) the new mount binds
-to the initial kernel mount of devpts. This mode is referred to as the
-'single-instance' mode and the current, single-instance semantics are
-preserved, i.e PTYs are common across the system.
-
-The only difference between this single-instance mode and the legacy mode
-is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
-can safely be ignored.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
-the mount is considered to be in the multi-instance mode and a new instance
-of the devpts fs is created. Any ptys created in this instance are independent
-of ptys in other instances of devpts. Like in the single-instance mode, the
-/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
-open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
-bind-mount.
-
-Eg: A container startup script could do the following:
-
-	$ chmod 0666 /dev/pts/ptmx
-	$ rm /dev/ptmx
-	$ ln -s pts/ptmx /dev/ptmx
-	$ ns_exec -cm /bin/bash
-
-	# We are now in new container
-
-	$ umount /dev/pts
-	$ mount -t devpts -o newinstance lxcpts /dev/pts
-	$ sshd -p 1234
-
-where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
-/bin/bash in the child process.  A pty created by the sshd is not visible in
-the original mount of /dev/pts.
+As an option instead of placing a /dev/ptmx device node at /dev/ptmx
+it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
+to bind mount /dev/ptx/ptmx to /dev/ptmx.  If you opt for using
+the devpts filesystem in this manner devpts should be mounted with
+the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
 
 
 Total count of pty pairs in all instances is limited by sysctls:
 Total count of pty pairs in all instances is limited by sysctls:
 kernel.pty.max = 4096		- global limit
 kernel.pty.max = 4096		- global limit
-kernel.pty.reserve = 1024	- reserve for initial instance
+kernel.pty.reserve = 1024	- reserved for filesystems mounted from the initial mount namespace
 kernel.pty.nr			- current count of ptys
 kernel.pty.nr			- current count of ptys
 
 
 Per-instance limit could be set by adding mount option "max=<count>".
 Per-instance limit could be set by adding mount option "max=<count>".
 This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
 This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
 In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
 In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
-
-User-space changes
-------------------
-
-In multi-instance mode (i.e '-o newinstance' mount option is specified at least
-once), following user-space issues should be noted.
-
-1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
-   and no change is needed to system-startup scripts.
-
-2. To effectively use multi-instance mode (i.e -o newinstance is specified)
-   administrators or startup scripts should "redirect" open of /dev/ptmx to
-   /dev/pts/ptmx using either a bind mount or symlink.
-
-	$ mount -t devpts -o newinstance devpts /dev/pts
-
-   followed by either
-
-	$ rm /dev/ptmx
-	$ ln -s pts/ptmx /dev/ptmx
-	$ chmod 666 /dev/pts/ptmx
-   or
-	$ mount -o bind /dev/pts/ptmx /dev/ptmx
-
-3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
-   enables better error-reporting and treats both single-instance and
-   multi-instance mounts similarly.
-
-   But this method requires that system-startup scripts set the mode of
-   /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
-   mode by, either
-
-   	- adding ptmxmode mount option to devpts entry in /etc/fstab, or
-	- using 'chmod 0666 /dev/pts/ptmx'
-
-4. If multi-instance mode mount is needed for containers, but the system
-   startup scripts have not yet been updated, container-startup scripts
-   should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
-   instance mounts.
-
-   Or, in general, container-startup scripts should use:
-
-	mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
-	if [ ! -L /dev/ptmx ]; then
-		mount -o bind /dev/pts/ptmx /dev/ptmx
-	fi
-
-   When all devpts mounts are multi-instance, /dev/ptmx can permanently be
-   a symlink to pts/ptmx and the bind mount can be ignored.
-
-5. A multi-instance mount that is not accompanied by the /dev/ptmx to
-   /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
-
-	mount -t devpts -o newinstance lxcpts /dev/pts
-
-   immediately followed by:
-
-	open("/dev/ptmx")
-
-    would create a pty, say /dev/pts/7, in the initial kernel mount.
-    But /dev/pts/7 would be invisible in the new mount.
-
-6. The permissions for /dev/pts/ptmx node should be specified when mounting
-   /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
-
-	mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
-
-   The permissions can be later be changed as usual with 'chmod'.
-
-	chmod 666 /dev/pts/ptmx
-
-7. A mount of devpts without the 'newinstance' option results in binding to
-   initial kernel mount.  This behavior while preserving legacy semantics,
-   does not provide strict isolation in a container environment. i.e by
-   mounting devpts without the 'newinstance' option, a container could
-   get visibility into the 'host' or root container's devpts.
-   
-   To workaround this and have strict isolation, all mounts of devpts,
-   including the mount in the root container, should use the newinstance
-   option.

+ 82 - 11
Documentation/kdump/gdbmacros.txt

@@ -170,21 +170,92 @@ document trapinfo
 	address the kernel panicked.
 	address the kernel panicked.
 end
 end
 
 
+define dump_log_idx
+	set $idx = $arg0
+	if ($argc > 1)
+		set $prev_flags = $arg1
+	else
+		set $prev_flags = 0
+	end
+	set $msg = ((struct printk_log *) (log_buf + $idx))
+	set $prefix = 1
+	set $newline = 1
+	set $log = log_buf + $idx + sizeof(*$msg)
 
 
-define dmesg
-	set $i = 0
-	set $end_idx = (log_end - 1) & (log_buf_len - 1)
+	# prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+	if (($prev_flags & 8) && !($msg->flags & 4))
+		set $prefix = 0
+	end
+
+	# msg->flags & LOG_CONT
+	if ($msg->flags & 8)
+		# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+		if (($prev_flags & 8) && !($prev_flags & 2))
+			set $prefix = 0
+		end
+		# (!(msg->flags & LOG_NEWLINE))
+		if (!($msg->flags & 2))
+			set $newline = 0
+		end
+	end
+
+	if ($prefix)
+		printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+	end
+	if ($msg->text_len != 0)
+		eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+	end
+	if ($newline)
+		printf "\n"
+	end
+	if ($msg->dict_len > 0)
+		set $dict = $log + $msg->text_len
+		set $idx = 0
+		set $line = 1
+		while ($idx < $msg->dict_len)
+			if ($line)
+				printf " "
+				set $line = 0
+			end
+			set $c = $dict[$idx]
+			if ($c == '\0')
+				printf "\n"
+				set $line = 1
+			else
+				if ($c < ' ' || $c >= 127 || $c == '\\')
+					printf "\\x%02x", $c
+				else
+					printf "%c", $c
+				end
+			end
+			set $idx = $idx + 1
+		end
+		printf "\n"
+	end
+end
+document dump_log_idx
+	Dump a single log given its index in the log buffer.  The first
+	parameter is the index into log_buf, the second is optional and
+	specified the previous log buffer's flags, used for properly
+	formatting continued lines.
+end
 
 
-	while ($i < logged_chars)
-		set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+define dmesg
+	set $i = log_first_idx
+	set $end_idx = log_first_idx
+	set $prev_flags = 0
 
 
-		if ($idx + 100 <= $end_idx) || \
-		   ($end_idx <= $idx && $idx + 100 < log_buf_len)
-			printf "%.100s", &log_buf[$idx]
-			set $i = $i + 100
+	while (1)
+		set $msg = ((struct printk_log *) (log_buf + $i))
+		if ($msg->len == 0)
+			set $i = 0
 		else
 		else
-			printf "%c", log_buf[$idx]
-			set $i = $i + 1
+			dump_log_idx $i $prev_flags
+			set $i = $i + $msg->len
+			set $prev_flags = $msg->flags
+		end
+		if ($i == $end_idx)
+			loop_break
 		end
 		end
 	end
 	end
 end
 end

+ 4 - 1
Documentation/security/keys.txt

@@ -826,7 +826,8 @@ The keyctl syscall functions are:
  (*) Compute a Diffie-Hellman shared secret or public key
  (*) Compute a Diffie-Hellman shared secret or public key
 
 
        long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
        long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
-		   char *buffer, size_t buflen);
+		   char *buffer, size_t buflen,
+		   void *reserved);
 
 
      The params struct contains serial numbers for three keys:
      The params struct contains serial numbers for three keys:
 
 
@@ -843,6 +844,8 @@ The keyctl syscall functions are:
      public key.  If the base is the remote public key, the result is
      public key.  If the base is the remote public key, the result is
      the shared secret.
      the shared secret.
 
 
+     The reserved argument must be set to NULL.
+
      The buffer length must be at least the length of the prime, or zero.
      The buffer length must be at least the length of the prime, or zero.
 
 
      If the buffer length is nonzero, the length of the result is
      If the buffer length is nonzero, the length of the result is

+ 3 - 0
MAINTAINERS

@@ -3094,6 +3094,7 @@ M:	Stephen Boyd <sboyd@codeaurora.org>
 L:	linux-clk@vger.kernel.org
 L:	linux-clk@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:	Maintained
 S:	Maintained
+F:	Documentation/devicetree/bindings/clock/
 F:	drivers/clk/
 F:	drivers/clk/
 X:	drivers/clk/clkdev.c
 X:	drivers/clk/clkdev.c
 F:	include/linux/clk-pr*
 F:	include/linux/clk-pr*
@@ -8023,6 +8024,7 @@ Q:	http://patchwork.kernel.org/project/linux-wireless/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 S:	Maintained
 S:	Maintained
+F:	Documentation/devicetree/bindings/net/wireless/
 F:	drivers/net/wireless/
 F:	drivers/net/wireless/
 
 
 NETXEN (1/10) GbE SUPPORT
 NETXEN (1/10) GbE SUPPORT
@@ -8960,6 +8962,7 @@ M:	Linus Walleij <linus.walleij@linaro.org>
 L:	linux-gpio@vger.kernel.org
 L:	linux-gpio@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:	Maintained
 S:	Maintained
+F:	Documentation/devicetree/bindings/pinctrl/
 F:	drivers/pinctrl/
 F:	drivers/pinctrl/
 F:	include/linux/pinctrl/
 F:	include/linux/pinctrl/
 
 

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 7
 PATCHLEVEL = 7
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Psychotic Stoned Sheep
 NAME = Psychotic Stoned Sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 4 - 27
arch/arc/Kconfig

@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 	def_bool y
 
 
 config ARCH_DISCONTIGMEM_ENABLE
 config ARCH_DISCONTIGMEM_ENABLE
-	def_bool y
+	def_bool n
 
 
 config ARCH_FLATMEM_ENABLE
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 	def_bool y
@@ -186,9 +186,6 @@ if SMP
 config ARC_HAS_COH_CACHES
 config ARC_HAS_COH_CACHES
 	def_bool n
 	def_bool n
 
 
-config ARC_HAS_REENTRANT_IRQ_LV2
-	def_bool n
-
 config ARC_MCIP
 config ARC_MCIP
 	bool "ARConnect Multicore IP (MCIP) Support "
 	bool "ARConnect Multicore IP (MCIP) Support "
 	depends on ISA_ARCV2
 	depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
 if ISA_ARCOMPACT
 if ISA_ARCOMPACT
 
 
 config ARC_COMPACT_IRQ_LEVELS
 config ARC_COMPACT_IRQ_LEVELS
-	bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+	bool "Setup Timer IRQ as high Priority"
 	default n
 	default n
-	# Timer HAS to be high priority, for any other high priority config
-	select ARC_IRQ3_LV2
 	# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
 	# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
-	depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
-	bool
-
-config ARC_IRQ5_LV2
-	bool
-
-config ARC_IRQ6_LV2
-	bool
-
-endif	#ARC_COMPACT_IRQ_LEVELS
+	depends on !SMP
 
 
 config ARC_FPU_SAVE_RESTORE
 config ARC_FPU_SAVE_RESTORE
 	bool "Enable FPU state persistence across context switch"
 	bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
 	default y
 	default y
 	depends on !ARC_CANT_LLSC
 	depends on !ARC_CANT_LLSC
 
 
-config ARC_STAR_9000923308
-	bool "Workaround for llock/scond livelock"
-	default n
-	depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
 config ARC_HAS_SWAPE
 config ARC_HAS_SWAPE
 	bool "Insn: SWAPE (endian-swap)"
 	bool "Insn: SWAPE (endian-swap)"
 	default y
 	default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
 
 
 config HIGHMEM
 config HIGHMEM
 	bool "High Memory Support"
 	bool "High Memory Support"
-	select DISCONTIGMEM
+	select ARCH_DISCONTIGMEM_ENABLE
 	help
 	help
 	  With ARC 2G:2G address split, only upper 2G is directly addressable by
 	  With ARC 2G:2G address split, only upper 2G is directly addressable by
 	  kernel. Enable this to potentially allow access to rest of 2G and PAE
 	  kernel. Enable this to potentially allow access to rest of 2G and PAE

+ 1 - 1
arch/arc/Makefile

@@ -127,7 +127,7 @@ libs-y		+= arch/arc/lib/ $(LIBGCC)
 
 
 boot		:= arch/arc/boot
 boot		:= arch/arc/boot
 
 
-#default target for make without any arguements.
+#default target for make without any arguments.
 KBUILD_IMAGE	:= bootpImage
 KBUILD_IMAGE	:= bootpImage
 
 
 all:	$(KBUILD_IMAGE)
 all:	$(KBUILD_IMAGE)

+ 0 - 2
arch/arc/boot/dts/abilis_tb100.dtsi

@@ -23,8 +23,6 @@
 
 
 
 
 / {
 / {
-	clock-frequency		= <500000000>;	/* 500 MHZ */
-
 	soc100 {
 	soc100 {
 		bus-frequency	= <166666666>;
 		bus-frequency	= <166666666>;
 
 

+ 0 - 2
arch/arc/boot/dts/abilis_tb101.dtsi

@@ -23,8 +23,6 @@
 
 
 
 
 / {
 / {
-	clock-frequency		= <500000000>;	/* 500 MHZ */
-
 	soc100 {
 	soc100 {
 		bus-frequency	= <166666666>;
 		bus-frequency	= <166666666>;
 
 

+ 0 - 1
arch/arc/boot/dts/axc001.dtsi

@@ -15,7 +15,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <750000000>;	/* 750 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 

+ 0 - 1
arch/arc/boot/dts/axc003.dtsi

@@ -14,7 +14,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <90000000>;
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 

+ 0 - 1
arch/arc/boot/dts/axc003_idu.dtsi

@@ -14,7 +14,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <90000000>;
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 

+ 0 - 1
arch/arc/boot/dts/eznps.dts

@@ -18,7 +18,6 @@
 
 
 / {
 / {
 	compatible = "ezchip,arc-nps";
 	compatible = "ezchip,arc-nps";
-	clock-frequency = <83333333>;	/* 83.333333 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&intc>;
 	interrupt-parent = <&intc>;

+ 0 - 1
arch/arc/boot/dts/nsim_700.dts

@@ -11,7 +11,6 @@
 
 
 / {
 / {
 	compatible = "snps,nsim";
 	compatible = "snps,nsim";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
 	interrupt-parent = <&core_intc>;

+ 0 - 1
arch/arc/boot/dts/nsimosci.dts

@@ -11,7 +11,6 @@
 
 
 / {
 / {
 	compatible = "snps,nsimosci";
 	compatible = "snps,nsimosci";
-	clock-frequency = <20000000>;	/* 20 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
 	interrupt-parent = <&core_intc>;

+ 0 - 1
arch/arc/boot/dts/nsimosci_hs.dts

@@ -11,7 +11,6 @@
 
 
 / {
 / {
 	compatible = "snps,nsimosci_hs";
 	compatible = "snps,nsimosci_hs";
-	clock-frequency = <20000000>;	/* 20 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
 	interrupt-parent = <&core_intc>;

+ 0 - 1
arch/arc/boot/dts/nsimosci_hs_idu.dts

@@ -11,7 +11,6 @@
 
 
 / {
 / {
 	compatible = "snps,nsimosci_hs";
 	compatible = "snps,nsimosci_hs";
-	clock-frequency = <5000000>;	/* 5 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
 	interrupt-parent = <&core_intc>;

+ 0 - 1
arch/arc/boot/dts/skeleton.dtsi

@@ -13,7 +13,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
 	chosen { };

+ 0 - 1
arch/arc/boot/dts/skeleton_hs.dtsi

@@ -8,7 +8,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
 	chosen { };

+ 0 - 1
arch/arc/boot/dts/skeleton_hs_idu.dtsi

@@ -8,7 +8,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
 	chosen { };

+ 0 - 1
arch/arc/boot/dts/vdk_axc003.dtsi

@@ -14,7 +14,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <50000000>;
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 

+ 0 - 1
arch/arc/boot/dts/vdk_axc003_idu.dtsi

@@ -15,7 +15,6 @@
 
 
 / {
 / {
 	compatible = "snps,arc";
 	compatible = "snps,arc";
-	clock-frequency = <50000000>;
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 

+ 4 - 41
arch/arc/include/asm/atomic.h

@@ -25,50 +25,17 @@
 
 
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF						\
-	unsigned int delay = 1, tmp;						\
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"	bz	4f			\n"				\
-	"   ; --- scond fail delay ---		\n"				\
-	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
-	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
-	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
-	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
-	"	b	1b			\n"	/* start over */	\
-	"4: ; --- success ---			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS							\
-	  ,[delay] "+&r" (delay),[tmp] "=&r"	(tmp)				\
-
-#else	/* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"	bnz     1b			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)					\
 #define ATOMIC_OP(op, c_op, asm_op)					\
 static inline void atomic_##op(int i, atomic_t *v)			\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
 {									\
-	unsigned int val;				                \
-	SCOND_FAIL_RETRY_VAR_DEF                                        \
+	unsigned int val;						\
 									\
 									\
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
-	SCOND_FAIL_RETRY_ASM						\
-									\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
-	  SCOND_FAIL_RETRY_VARS						\
 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 	  [i]	"ir"	(i)						\
 	  [i]	"ir"	(i)						\
 	: "cc");							\
 	: "cc");							\
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v)			\
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 {									\
 {									\
-	unsigned int val;				                \
-	SCOND_FAIL_RETRY_VAR_DEF                                        \
+	unsigned int val;						\
 									\
 									\
 	/*								\
 	/*								\
 	 * Explicit full memory barrier needed before/after as		\
 	 * Explicit full memory barrier needed before/after as		\
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
-	SCOND_FAIL_RETRY_ASM						\
-									\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val)						\
 	: [val]	"=&r"	(val)						\
-	  SCOND_FAIL_RETRY_VARS						\
 	: [ctr]	"r"	(&v->counter),					\
 	: [ctr]	"r"	(&v->counter),					\
 	  [i]	"ir"	(i)						\
 	  [i]	"ir"	(i)						\
 	: "cc");							\
 	: "cc");							\

+ 2 - 2
arch/arc/include/asm/entry-compact.h

@@ -76,8 +76,8 @@
 	 * We need to be a bit more cautious here. What if a kernel bug in
 	 * We need to be a bit more cautious here. What if a kernel bug in
 	 * L1 ISR, caused SP to go whaco (some small value which looks like
 	 * L1 ISR, caused SP to go whaco (some small value which looks like
 	 * USER stk) and then we take L2 ISR.
 	 * USER stk) and then we take L2 ISR.
-	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
-	 * instead of shouting alound
+	 * Above brlo alone would treat it as a valid L1-L2 scenario
+	 * instead of shouting around
 	 * The only feasible way is to make sure this L2 happened in
 	 * The only feasible way is to make sure this L2 happened in
 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
 	 * L1 ISR before it switches stack
 	 * L1 ISR before it switches stack

+ 1 - 1
arch/arc/include/asm/mmu_context.h

@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
 		local_flush_tlb_all();
 		local_flush_tlb_all();
 
 
 		/*
 		/*
-		 * Above checke for rollover of 8 bit ASID in 32 bit container.
+		 * Above check for rollover of 8 bit ASID in 32 bit container.
 		 * If the container itself wrapped around, set it to a non zero
 		 * If the container itself wrapped around, set it to a non zero
 		 * "generation" to distinguish from no context
 		 * "generation" to distinguish from no context
 		 */
 		 */

+ 1 - 1
arch/arc/include/asm/pgtable.h

@@ -47,7 +47,7 @@
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * some have different value in TLB.
  * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      while for PTE perspective, they are 8 and 9 respectively
  *      while for PTE perspective, they are 8 and 9 respectively
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos

+ 1 - 1
arch/arc/include/asm/processor.h

@@ -78,7 +78,7 @@ struct task_struct;
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 
 /*
 /*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  * Look in process.c for details of kernel stack layout
  */
  */
 #define TSK_K_ESP(tsk)		(tsk->thread.ksp)
 #define TSK_K_ESP(tsk)		(tsk->thread.ksp)

+ 1 - 1
arch/arc/include/asm/smp.h

@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
  * (1) These insn were introduced only in 4.10 release. So for older released
  * (1) These insn were introduced only in 4.10 release. So for older released
  *	support needed.
  *	support needed.
  *
  *
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
  *	gaurantted by the platform (not something which core handles).
  *	gaurantted by the platform (not something which core handles).
  *	Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *	Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *	disabling for atomicity.
  *	disabling for atomicity.

+ 0 - 292
arch/arc/include/asm/spinlock.h

@@ -20,11 +20,6 @@
 
 
 #ifdef CONFIG_ARC_HAS_LLSC
 #ifdef CONFIG_ARC_HAS_LLSC
 
 
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
 {
 	unsigned int val;
 	unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 	smp_mb();
 	smp_mb();
 }
 }
 
 
-#else	/* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF						\
-	unsigned int delay, tmp;						\
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"   ; --- scond fail delay ---		\n"				\
-	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
-	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
-	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
-	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
-	"	b	1b			\n"	/* start over */	\
-	"					\n"				\
-	"4: ; --- done ---			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS							\
-	  ,[delay] "=&r" (delay), [tmp] "=&r"	(tmp)				\
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[slock]]	\n"
-	"	breq	%[val], %[LOCKED], 0b	\n"	/* spin while LOCKED */
-	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
-	"	bz	4f			\n"	/* done */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [slock]	"r"	(&(lock->slock)),
-	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[slock]]	\n"
-	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
-	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [slock]	"r"	(&(lock->slock)),
-	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-	smp_mb();
-
-	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-	smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	/*
-	 * zero means writer holds the lock exclusively, deny Reader.
-	 * Otherwise grant lock to first/subseq reader
-	 *
-	 * 	if (rw->counter > 0) {
-	 *		rw->counter--;
-	 *		ret = 1;
-	 *	}
-	 */
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brls	%[val], %[WR_LOCKED], 0b\n"	/* <= 0: spin while write locked */
-	"	sub	%[val], %[val], 1	\n"	/* reader lock */
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz	4f			\n"	/* done */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
-	"	sub	%[val], %[val], 1	\n"	/* counter-- */
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	/*
-	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
-	 * deny writer. Otherwise if unlocked grant to writer
-	 * Hence the claim that Linux rwlocks are unfair to writers.
-	 * (can be starved for an indefinite time by readers).
-	 *
-	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
-	 *		rw->counter = 0;
-	 *		ret = 1;
-	 *	}
-	 */
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brne	%[val], %[UNLOCKED], 0b	\n"	/* while !UNLOCKED spin */
-	"	mov	%[val], %[WR_LOCKED]	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz	4f			\n"
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
-	"	mov	%[val], %[WR_LOCKED]	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-
-	smp_mb();
-
-	/*
-	 * rw->counter++;
-	 */
-	__asm__ __volatile__(
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	add	%[val], %[val], 1	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bnz	1b			\n"
-	"					\n"
-	: [val]		"=&r"	(val)
-	: [rwlock]	"r"	(&(rw->counter))
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-
-	smp_mb();
-
-	/*
-	 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-	 */
-	__asm__ __volatile__(
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	scond	%[UNLOCKED], [%[rwlock]]\n"
-	"	bnz	1b			\n"
-	"					\n"
-	: [val]		"=&r"	(val)
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"r"	(__ARCH_RW_LOCK_UNLOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif	/* CONFIG_ARC_STAR_9000923308 */
-
 #else	/* !CONFIG_ARC_HAS_LLSC */
 #else	/* !CONFIG_ARC_HAS_LLSC */
 
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 static inline void arch_spin_lock(arch_spinlock_t *lock)

+ 1 - 1
arch/arc/include/asm/thread_info.h

@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
 
 
 /*
 /*
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  */
  */
 
 

+ 1 - 1
arch/arc/include/asm/uaccess.h

@@ -32,7 +32,7 @@
 #define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
 #define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
 
 
 /*
 /*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
  * 	(start < TASK_SIZE) && (start+len < TASK_SIZE)
  * 	(start < TASK_SIZE) && (start+len < TASK_SIZE)
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * emitted directly in code.
  * emitted directly in code.

+ 1 - 1
arch/arc/include/uapi/asm/swab.h

@@ -74,7 +74,7 @@
 	__tmp ^ __in;						\
 	__tmp ^ __in;						\
 })
 })
 
 
-#elif (ARC_BSWAP_TYPE == 2)	/* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2)	/* Custom single cycle bswap instruction */
 
 
 #define __arch_swab32(x)						\
 #define __arch_swab32(x)						\
 ({									\
 ({									\

+ 2 - 16
arch/arc/kernel/entry-compact.S

@@ -91,27 +91,13 @@ VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
 
 ; ******************** Device ISRs **********************
 ; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 VECTOR   handle_interrupt_level2
 VECTOR   handle_interrupt_level2
 #else
 #else
 VECTOR   handle_interrupt_level1
 VECTOR   handle_interrupt_level1
 #endif
 #endif
 
 
-.rept   25
+.rept   28
 VECTOR   handle_interrupt_level1 ; Other devices
 VECTOR   handle_interrupt_level1 ; Other devices
 .endr
 .endr
 
 

+ 2 - 4
arch/arc/kernel/intc-compact.c

@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
 {
 {
 	int level_mask = 0;
 	int level_mask = 0;
 
 
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+       /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+	level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
 
 
 	/*
 	/*
 	 * Write to register, even if no LV2 IRQs configured to reset it
 	 * Write to register, even if no LV2 IRQs configured to reset it

+ 1 - 1
arch/arc/kernel/perf_event.c

@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
 	int64_t delta = new_raw_count - prev_raw_count;
 	int64_t delta = new_raw_count - prev_raw_count;
 
 
 	/*
 	/*
-	 * We don't afaraid of hwc->prev_count changing beneath our feet
+	 * We aren't afraid of hwc->prev_count changing beneath our feet
 	 * because there's no way for us to re-enter this function anytime.
 	 * because there's no way for us to re-enter this function anytime.
 	 */
 	 */
 	local64_set(&hwc->prev_count, new_raw_count);
 	local64_set(&hwc->prev_count, new_raw_count);

+ 1 - 1
arch/arc/kernel/setup.c

@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
 		/*
 		/*
 		 * If we are here, it is established that @uboot_arg didn't
 		 * If we are here, it is established that @uboot_arg didn't
 		 * point to DT blob. Instead if u-boot says it is cmdline,
 		 * point to DT blob. Instead if u-boot says it is cmdline,
-		 * Appent to embedded DT cmdline.
+		 * append to embedded DT cmdline.
 		 * setup_machine_fdt() would have populated @boot_command_line
 		 * setup_machine_fdt() would have populated @boot_command_line
 		 */
 		 */
 		if (uboot_tag == 1) {
 		if (uboot_tag == 1) {

+ 1 - 1
arch/arc/kernel/signal.c

@@ -34,7 +34,7 @@
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *   It turned out that the code to change Execute permssions for TLB entries
  *   It turned out that the code to change Execute permssions for TLB entries
  *   of user was not guarded for interrupts (mod_tlb_permission)
  *   of user was not guarded for interrupts (mod_tlb_permission)
- *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *   This was causing TLB entries to be overwritten on unrelated indexes
  *
  *
  * Vineetg: July 15th 2008: Bug #94183
  * Vineetg: July 15th 2008: Bug #94183
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,

+ 1 - 1
arch/arc/kernel/troubleshoot.c

@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
 	return 0;
 	return 0;
 }
 }
 
 
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
 static ssize_t tlb_stats_output(struct file *file,	/* file descriptor */
 static ssize_t tlb_stats_output(struct file *file,	/* file descriptor */
 				char __user *user_buf,	/* user buffer */
 				char __user *user_buf,	/* user buffer */
 				size_t len,		/* length of buffer */
 				size_t len,		/* length of buffer */

+ 3 - 3
arch/arc/mm/cache.c

@@ -215,7 +215,7 @@ slc_chk:
  * ------------------
  * ------------------
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * only support 8k (default), 16k and 4k.
  * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
  * the existing scheme of piggybacking won't work for certain configurations.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
 
 
 	/*
 	/*
 	 * This is technically for MMU v4, using the MMU v3 programming model
 	 * This is technically for MMU v4, using the MMU v3 programming model
-	 * Special work for HS38 aliasing I-cache configuratino with PAE40
+	 * Special work for HS38 aliasing I-cache configuration with PAE40
 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
 	 *   - (and needs to be written before the lower 32 bits)
 	 *   - (and needs to be written before the lower 32 bits)
 	 * Note that PTAG_HI is hoisted outside the line loop
 	 * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
 			      ic->ver, CONFIG_ARC_MMU_VER);
 			      ic->ver, CONFIG_ARC_MMU_VER);
 
 
 		/*
 		/*
-		 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+		 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
 		 */
 		 */
 		if (is_isa_arcv2() && ic->alias)
 		if (is_isa_arcv2() && ic->alias)

+ 1 - 1
arch/arc/mm/dma.c

@@ -10,7 +10,7 @@
  * DMA Coherent API Notes
  * DMA Coherent API Notes
  *
  *
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
  * Cache bit off in the TLB entry.
  * Cache bit off in the TLB entry.
  *
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
  * The default DMA address == Phy address which is 0x8000_0000 based.

+ 1 - 1
arch/arm/kernel/ptrace.c

@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	vfp_flush_hwstate(thread);
 	thread->vfpstate.hard = new_vfp;
 	thread->vfpstate.hard = new_vfp;
+	vfp_flush_hwstate(thread);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
arch/arm/mach-vexpress/spc.c

@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 
 
 	init.name = dev_name(cpu_dev);
 	init.name = dev_name(cpu_dev);
 	init.ops = &clk_spc_ops;
 	init.ops = &clk_spc_ops;
-	init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+	init.flags = CLK_GET_RATE_NOCACHE;
 	init.num_parents = 0;
 	init.num_parents = 0;
 
 
 	return devm_clk_register(cpu_dev, &spc->hw);
 	return devm_clk_register(cpu_dev, &spc->hw);

+ 21 - 0
arch/arm64/Kconfig

@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
 config MMU
 config MMU
 	def_bool y
 	def_bool y
 
 
+config ARM64_PAGE_SHIFT
+	int
+	default 16 if ARM64_64K_PAGES
+	default 14 if ARM64_16K_PAGES
+	default 12
+
+config ARM64_CONT_SHIFT
+	int
+	default 5 if ARM64_64K_PAGES
+	default 7 if ARM64_16K_PAGES
+	default 4
+
 config ARCH_MMAP_RND_BITS_MIN
 config ARCH_MMAP_RND_BITS_MIN
        default 14 if ARM64_64K_PAGES
        default 14 if ARM64_64K_PAGES
        default 16 if ARM64_16K_PAGES
        default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
 
 
 	  If unsure, say Y.
 	  If unsure, say Y.
 
 
+config CAVIUM_ERRATUM_23144
+	bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+	depends on NUMA
+	default y
+	help
+	  ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+	  If unsure, say Y.
+
 config CAVIUM_ERRATUM_23154
 config CAVIUM_ERRATUM_23154
 	bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
 	bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
 	default y
 	default y

+ 13 - 12
arch/arm64/Kconfig.debug

@@ -12,7 +12,8 @@ config ARM64_PTDUMP
 	  who are working in architecture specific areas of the kernel.
 	  who are working in architecture specific areas of the kernel.
 	  It is probably not a good idea to enable this feature in a production
 	  It is probably not a good idea to enable this feature in a production
 	  kernel.
 	  kernel.
-	  If in doubt, say "N"
+
+	  If in doubt, say N.
 
 
 config PID_IN_CONTEXTIDR
 config PID_IN_CONTEXTIDR
 	bool "Write the current PID to the CONTEXTIDR register"
 	bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
 	  value.
 	  value.
 
 
 config DEBUG_SET_MODULE_RONX
 config DEBUG_SET_MODULE_RONX
-        bool "Set loadable kernel module data as NX and text as RO"
-        depends on MODULES
-        help
-          This option helps catch unintended modifications to loadable
-          kernel module's text and read-only data. It also prevents execution
-          of module data. Such protection may interfere with run-time code
-          patching and dynamic kernel tracing - and they might also protect
-          against certain classes of kernel exploits.
-          If in doubt, say "N".
+	bool "Set loadable kernel module data as NX and text as RO"
+	depends on MODULES
+	default y
+	help
+	  Is this is set, kernel module text and rodata will be made read-only.
+	  This is to help catch accidental or malicious attempts to change the
+	  kernel's executable code.
+
+	  If in doubt, say Y.
 
 
 config DEBUG_RODATA
 config DEBUG_RODATA
 	bool "Make kernel text and rodata read-only"
 	bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
 	  is to help catch accidental or malicious attempts to change the
 	  is to help catch accidental or malicious attempts to change the
 	  kernel's executable code.
 	  kernel's executable code.
 
 
-	  If in doubt, say Y
+	  If in doubt, say Y.
 
 
 config DEBUG_ALIGN_RODATA
 config DEBUG_ALIGN_RODATA
 	depends on DEBUG_RODATA
 	depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
 	  alignment and potentially wasted space. Turn on this option if
 	  alignment and potentially wasted space. Turn on this option if
 	  performance is more important than memory pressure.
 	  performance is more important than memory pressure.
 
 
-	  If in doubt, say N
+	  If in doubt, say N.
 
 
 source "drivers/hwtracing/coresight/Kconfig"
 source "drivers/hwtracing/coresight/Kconfig"
 
 

+ 3 - 1
arch/arm64/Makefile

@@ -60,7 +60,9 @@ head-y		:= arch/arm64/kernel/head.o
 
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+		 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+		 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 else
 else
 TEXT_OFFSET := 0x00080000
 TEXT_OFFSET := 0x00080000
 endif
 endif

+ 2 - 2
arch/arm64/include/asm/elf.h

@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define STACK_RND_MASK			(0x3ffff >> (PAGE_SHIFT - 12))
 #define STACK_RND_MASK			(0x3ffff >> (PAGE_SHIFT - 12))
 #endif
 #endif
 
 
-#ifdef CONFIG_COMPAT
-
 #ifdef __AARCH64EB__
 #ifdef __AARCH64EB__
 #define COMPAT_ELF_PLATFORM		("v8b")
 #define COMPAT_ELF_PLATFORM		("v8b")
 #else
 #else
 #define COMPAT_ELF_PLATFORM		("v8l")
 #define COMPAT_ELF_PLATFORM		("v8l")
 #endif
 #endif
 
 
+#ifdef CONFIG_COMPAT
+
 #define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 #define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 
 /* AArch32 registers. */
 /* AArch32 registers. */

+ 2 - 1
arch/arm64/include/asm/memory.h

@@ -55,8 +55,9 @@
 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
 
 
 /*
 /*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
  *		 (VA_BITS - 1))
  *		 (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_START - the first kernel virtual address.
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_SIZE - the maximum size of a user space task.

+ 2 - 10
arch/arm64/include/asm/page.h

@@ -23,16 +23,8 @@
 
 
 /* PAGE_SHIFT determines the page size */
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT		16
-#define CONT_SHIFT		5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT		14
-#define CONT_SHIFT		7
-#else
-#define PAGE_SHIFT		12
-#define CONT_SHIFT		4
-#endif
+#define PAGE_SHIFT		CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT		CONFIG_ARM64_CONT_SHIFT
 #define PAGE_SIZE		(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_SIZE		(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK		(~(PAGE_SIZE-1))
 #define PAGE_MASK		(~(PAGE_SIZE-1))
 
 

+ 0 - 13
arch/arm64/include/asm/uaccess.h

@@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs)
 
 
 #define segment_eq(a, b)	((a) == (b))
 #define segment_eq(a, b)	((a) == (b))
 
 
-/*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr)							\
-({									\
-	unsigned long flag;						\
-	asm("cmp %1, %0; cset %0, lo"					\
-		: "=&r" (flag)						\
-		: "r" (addr), "0" (current_thread_info()->addr_limit)	\
-		: "cc");						\
-	flag;								\
-})
-
 /*
 /*
  * Test whether a block of memory is a valid user space address.
  * Test whether a block of memory is a valid user space address.
  * Returns 1 if the range is valid, 0 otherwise.
  * Returns 1 if the range is valid, 0 otherwise.

+ 1 - 1
arch/arm64/include/asm/unistd.h

@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 
 
-#define __NR_compat_syscalls		390
+#define __NR_compat_syscalls		394
 #endif
 #endif
 
 
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_CLONE

+ 8 - 0
arch/arm64/include/asm/unistd32.h

@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 #define __NR_membarrier 389
 #define __NR_membarrier 389
 __SYSCALL(__NR_membarrier, sys_membarrier)
 __SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
 
 
 /*
 /*
  * Please add new compat syscalls above this comment and update
  * Please add new compat syscalls above this comment and update

+ 7 - 1
arch/arm64/kernel/cpuinfo.c

@@ -22,6 +22,8 @@
 
 
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/personality.h>
 #include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
 static int c_show(struct seq_file *m, void *v)
 static int c_show(struct seq_file *m, void *v)
 {
 {
 	int i, j;
 	int i, j;
+	bool compat = personality(current->personality) == PER_LINUX32;
 
 
 	for_each_online_cpu(i) {
 	for_each_online_cpu(i) {
 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
 		 * "processor".  Give glibc what it expects.
 		 * "processor".  Give glibc what it expects.
 		 */
 		 */
 		seq_printf(m, "processor\t: %d\n", i);
 		seq_printf(m, "processor\t: %d\n", i);
+		if (compat)
+			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
 
 
 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
 			   loops_per_jiffy / (500000UL/HZ),
 			   loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
 		 * software which does already (at least for 32-bit).
 		 * software which does already (at least for 32-bit).
 		 */
 		 */
 		seq_puts(m, "Features\t:");
 		seq_puts(m, "Features\t:");
-		if (personality(current->personality) == PER_LINUX32) {
+		if (compat) {
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT
 			for (j = 0; compat_hwcap_str[j]; j++)
 			for (j = 0; compat_hwcap_str[j]; j++)
 				if (compat_elf_hwcap & (1 << j))
 				if (compat_elf_hwcap & (1 << j))

+ 3 - 2
arch/arm64/kernel/traps.c

@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 	void __user *pc = (void __user *)instruction_pointer(regs);
 	void __user *pc = (void __user *)instruction_pointer(regs);
 	console_verbose();
 	console_verbose();
 
 
-	pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
-		handler[reason], esr, esr_get_class_string(esr));
+	pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+		handler[reason], smp_processor_id(), esr,
+		esr_get_class_string(esr));
 	__show_regs(regs);
 	__show_regs(regs);
 
 
 	info.si_signo = SIGILL;
 	info.si_signo = SIGILL;

+ 21 - 15
arch/arm64/kvm/hyp/vgic-v3-sr.c

@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 	 * Make sure stores to the GIC via the memory mapped interface
 	 * Make sure stores to the GIC via the memory mapped interface
 	 * are now visible to the system register interface.
 	 * are now visible to the system register interface.
 	 */
 	 */
-	dsb(st);
+	if (!cpu_if->vgic_sre)
+		dsb(st);
 
 
 	cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
 	cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
 
 
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 			if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
 			if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
 				continue;
 				continue;
 
 
-			if (cpu_if->vgic_elrsr & (1 << i)) {
+			if (cpu_if->vgic_elrsr & (1 << i))
 				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
 				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
-				continue;
-			}
+			else
+				cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 
 
-			cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 			__gic_v3_set_lr(0, i);
 			__gic_v3_set_lr(0, i);
 		}
 		}
 
 
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 
 
 	val = read_gicreg(ICC_SRE_EL2);
 	val = read_gicreg(ICC_SRE_EL2);
 	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
 	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
-	isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
-	write_gicreg(1, ICC_SRE_EL1);
+
+	if (!cpu_if->vgic_sre) {
+		/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+		isb();
+		write_gicreg(1, ICC_SRE_EL1);
+	}
 }
 }
 
 
 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
 	 * been actually programmed with the value we want before
 	 * been actually programmed with the value we want before
 	 * starting to mess with the rest of the GIC.
 	 * starting to mess with the rest of the GIC.
 	 */
 	 */
-	write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
-	isb();
+	if (!cpu_if->vgic_sre) {
+		write_gicreg(0, ICC_SRE_EL1);
+		isb();
+	}
 
 
 	val = read_gicreg(ICH_VTR_EL2);
 	val = read_gicreg(ICH_VTR_EL2);
 	max_lr_idx = vtr_to_max_lr_idx(val);
 	max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
 	 * (re)distributors. This ensure the guest will read the
 	 * (re)distributors. This ensure the guest will read the
 	 * correct values from the memory-mapped interface.
 	 * correct values from the memory-mapped interface.
 	 */
 	 */
-	isb();
-	dsb(sy);
+	if (!cpu_if->vgic_sre) {
+		isb();
+		dsb(sy);
+	}
 	vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 	vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 
 
 	/*
 	/*
 	 * Prevent the guest from touching the GIC system registers if
 	 * Prevent the guest from touching the GIC system registers if
 	 * SRE isn't enabled for GICv3 emulation.
 	 * SRE isn't enabled for GICv3 emulation.
 	 */
 	 */
-	if (!cpu_if->vgic_sre) {
-		write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
-			     ICC_SRE_EL2);
-	}
+	write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+		     ICC_SRE_EL2);
 }
 }
 
 
 void __hyp_text __vgic_v3_init_lrs(void)
 void __hyp_text __vgic_v3_init_lrs(void)

+ 12 - 1
arch/arm64/kvm/sys_regs.c

@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 	return true;
 	return true;
 }
 }
 
 
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+			   struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	if (p->is_write)
+		return ignore_write(vcpu, p);
+
+	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+	return true;
+}
+
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 			struct sys_reg_params *p,
 			struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
 			const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	  access_gic_sgi },
 	  access_gic_sgi },
 	/* ICC_SRE_EL1 */
 	/* ICC_SRE_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
-	  trap_raz_wi },
+	  access_gic_sre },
 
 
 	/* CONTEXTIDR_EL1 */
 	/* CONTEXTIDR_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),

+ 7 - 1
arch/arm64/mm/dump.c

@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
 
 
 struct pg_level {
 struct pg_level {
 	const struct prot_bits *bits;
 	const struct prot_bits *bits;
+	const char *name;
 	size_t num;
 	size_t num;
 	u64 mask;
 	u64 mask;
 };
 };
@@ -157,15 +158,19 @@ struct pg_level {
 static struct pg_level pg_level[] = {
 static struct pg_level pg_level[] = {
 	{
 	{
 	}, { /* pgd */
 	}, { /* pgd */
+		.name	= "PGD",
 		.bits	= pte_bits,
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pud */
 	}, { /* pud */
+		.name	= (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
 		.bits	= pte_bits,
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pmd */
 	}, { /* pmd */
+		.name	= (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
 		.bits	= pte_bits,
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pte */
 	}, { /* pte */
+		.name	= "PTE",
 		.bits	= pte_bits,
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 		.num	= ARRAY_SIZE(pte_bits),
 	},
 	},
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 				delta >>= 10;
 				delta >>= 10;
 				unit++;
 				unit++;
 			}
 			}
-			seq_printf(st->seq, "%9lu%c", delta, *unit);
+			seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+				   pg_level[st->level].name);
 			if (pg_level[st->level].bits)
 			if (pg_level[st->level].bits)
 				dump_prot(st, pg_level[st->level].bits,
 				dump_prot(st, pg_level[st->level].bits,
 					  pg_level[st->level].num);
 					  pg_level[st->level].num);

+ 14 - 0
arch/arm64/mm/hugetlbpage.c

@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 	} else if (ps == PUD_SIZE) {
 	} else if (ps == PUD_SIZE) {
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+	} else if (ps == (PAGE_SIZE * CONT_PTES)) {
+		hugetlb_add_hstate(CONT_PTE_SHIFT);
+	} else if (ps == (PMD_SIZE * CONT_PMDS)) {
+		hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
 	} else {
 	} else {
 		hugetlb_bad_size();
 		hugetlb_bad_size();
 		pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
 		pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
 	return 1;
 	return 1;
 }
 }
 __setup("hugepagesz=", setup_hugepagesz);
 __setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+	if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+		hugetlb_add_hstate(CONT_PMD_SHIFT);
+	return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif

+ 2 - 0
arch/parisc/include/asm/traps.h

@@ -8,6 +8,8 @@ struct pt_regs;
 void parisc_terminate(char *msg, struct pt_regs *regs,
 void parisc_terminate(char *msg, struct pt_regs *regs,
 		int code, unsigned long offset) __noreturn __cold;
 		int code, unsigned long offset) __noreturn __cold;
 
 
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
 /* mm/fault.c */
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 		unsigned long address);
 		unsigned long address);

+ 3 - 2
arch/parisc/kernel/processor.c

@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
 
-		printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
-			cpunum, coproc_cfg.revision, coproc_cfg.model);
+		if (cpunum == 0)
+			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
+				cpunum, coproc_cfg.revision, coproc_cfg.model);
 
 
 		/*
 		/*
 		** store status register to stack (hopefully aligned)
 		** store status register to stack (hopefully aligned)

+ 0 - 5
arch/parisc/kernel/time.c

@@ -309,11 +309,6 @@ void __init time_init(void)
 	clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
 	clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
 				NSEC_PER_MSEC, 0);
 				NSEC_PER_MSEC, 0);
 
 
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
-	/* At bootup only one 64bit CPU is online and cr16 is "stable" */
-	set_sched_clock_stable();
-#endif
-
 	start_cpu_itimer();	/* get CPU 0 started */
 	start_cpu_itimer();	/* get CPU 0 started */
 
 
 	/* register at clocksource framework */
 	/* register at clocksource framework */

+ 10 - 3
arch/parisc/kernel/unaligned.c

@@ -28,6 +28,7 @@
 #include <linux/ratelimit.h>
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
 #include <asm/hardirq.h>
+#include <asm/traps.h>
 
 
 /* #define DEBUG_UNALIGNED 1 */
 /* #define DEBUG_UNALIGNED 1 */
 
 
@@ -130,8 +131,6 @@
 
 
 int unaligned_enabled __read_mostly = 1;
 int unaligned_enabled __read_mostly = 1;
 
 
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
 static int emulate_ldh(struct pt_regs *regs, int toreg)
 static int emulate_ldh(struct pt_regs *regs, int toreg)
 {
 {
 	unsigned long saddr = regs->ior;
 	unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
 		break;
 		break;
 	}
 	}
 
 
-	if (modify && R1(regs->iir))
+	if (ret == 0 && modify && R1(regs->iir))
 		regs->gr[R1(regs->iir)] = newbase;
 		regs->gr[R1(regs->iir)] = newbase;
 
 
 
 
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
 
 
 	if (ret)
 	if (ret)
 	{
 	{
+		/*
+		 * The unaligned handler failed.
+		 * If we were called by __get_user() or __put_user() jump
+		 * to it's exception fixup handler instead of crashing.
+		 */
+		if (!user_mode(regs) && fixup_exception(regs))
+			return;
+
 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
 		die_if_kernel("Unaligned data reference", regs, 28);
 		die_if_kernel("Unaligned data reference", regs, 28);
 
 

+ 14 - 8
arch/parisc/kernel/unwind.c

@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
 	if (addr >= kernel_unwind_table.start && 
 	if (addr >= kernel_unwind_table.start && 
 	    addr <= kernel_unwind_table.end)
 	    addr <= kernel_unwind_table.end)
 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
-	else 
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&unwind_lock, flags);
 		list_for_each_entry(table, &unwind_tables, list) {
 		list_for_each_entry(table, &unwind_tables, list) {
 			if (addr >= table->start && 
 			if (addr >= table->start && 
 			    addr <= table->end)
 			    addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
 				break;
 				break;
 			}
 			}
 		}
 		}
+		spin_unlock_irqrestore(&unwind_lock, flags);
+	}
 
 
 	return e;
 	return e;
 }
 }
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
 
 
 			insn = *(unsigned int *)npc;
 			insn = *(unsigned int *)npc;
 
 
-			if ((insn & 0xffffc000) == 0x37de0000 ||
-			    (insn & 0xffe00000) == 0x6fc00000) {
+			if ((insn & 0xffffc001) == 0x37de0000 ||
+			    (insn & 0xffe00001) == 0x6fc00000) {
 				/* ldo X(sp), sp, or stwm X,D(sp) */
 				/* ldo X(sp), sp, or stwm X,D(sp) */
-				frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-					((insn & 0x3fff) >> 1);
+				frame_size += (insn & 0x3fff) >> 1;
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				    "%lx, frame_size = %ld\n", info->ip,
 				    "%lx, frame_size = %ld\n", info->ip,
 				    insn, npc, frame_size);
 				    insn, npc, frame_size);
-			} else if ((insn & 0xffe00008) == 0x73c00008) {
+			} else if ((insn & 0xffe00009) == 0x73c00008) {
 				/* std,ma X,D(sp) */
 				/* std,ma X,D(sp) */
-				frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-					(((insn >> 4) & 0x3ff) << 3);
+				frame_size += ((insn >> 4) & 0x3ff) << 3;
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				    "%lx, frame_size = %ld\n", info->ip,
 				    "%lx, frame_size = %ld\n", info->ip,
 				    insn, npc, frame_size);
 				    insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
 			}
 			}
 		}
 		}
 
 
+		if (frame_size > e->Total_frame_size << 3)
+			frame_size = e->Total_frame_size << 3;
+
 		if (!unwind_special(info, e->region_start, frame_size)) {
 		if (!unwind_special(info, e->region_start, frame_size)) {
 			info->prev_sp = info->sp - frame_size;
 			info->prev_sp = info->sp - frame_size;
 			if (e->Millicode)
 			if (e->Millicode)

+ 3 - 3
arch/powerpc/include/asm/reg.h

@@ -717,7 +717,7 @@
 #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
 #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
 #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
 #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
 #define SPRN_MMCR1	798
 #define SPRN_MMCR1	798
-#define SPRN_MMCR2	769
+#define SPRN_MMCR2	785
 #define SPRN_MMCRA	0x312
 #define SPRN_MMCRA	0x312
 #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
 #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -754,13 +754,13 @@
 #define SPRN_PMC6	792
 #define SPRN_PMC6	792
 #define SPRN_PMC7	793
 #define SPRN_PMC7	793
 #define SPRN_PMC8	794
 #define SPRN_PMC8	794
-#define SPRN_SIAR	780
-#define SPRN_SDAR	781
 #define SPRN_SIER	784
 #define SPRN_SIER	784
 #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
 #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
 #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
 #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
 #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
 #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
 #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
 #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
+#define SPRN_SIAR	796
+#define SPRN_SDAR	797
 #define SPRN_TACR	888
 #define SPRN_TACR	888
 #define SPRN_TCSCR	889
 #define SPRN_TCSCR	889
 #define SPRN_CSIGR	890
 #define SPRN_CSIGR	890

+ 1 - 0
arch/powerpc/kernel/prom_init.c

@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
+	W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */

+ 20 - 2
arch/powerpc/mm/hash_utils_64.c

@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
 	},
 	},
 };
 };
 
 
+/*
+ * 'R' and 'C' update notes:
+ *  - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ *     create writeable HPTEs without C set, because the hcall H_PROTECT
+ *     that we use in that case will not update C
+ *  - The above is however not a problem, because we also don't do that
+ *     fancy "no flush" variant of eviction and we use H_REMOVE which will
+ *     do the right thing and thus we don't have the race I described earlier
+ *
+ *    - Under bare metal,  we do have the race, so we need R and C set
+ *    - We make sure R is always set and never lost
+ *    - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
 unsigned long htab_convert_pte_flags(unsigned long pteflags)
 unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
 {
 	unsigned long rflags = 0;
 	unsigned long rflags = 0;
@@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
 			rflags |= 0x1;
 			rflags |= 0x1;
 	}
 	}
 	/*
 	/*
-	 * Always add "C" bit for perf. Memory coherence is always enabled
+	 * We can't allow hardware to update hpte bits. Hence always
+	 * set 'R' bit and set 'C' if it is a write fault
+	 * Memory coherence is always enabled
 	 */
 	 */
-	rflags |=  HPTE_R_C | HPTE_R_M;
+	rflags |=  HPTE_R_R | HPTE_R_M;
+
+	if (pteflags & _PAGE_DIRTY)
+		rflags |= HPTE_R_C;
 	/*
 	/*
 	 * Add in WIG bits
 	 * Add in WIG bits
 	 */
 	 */

+ 1 - 4
arch/powerpc/mm/pgtable-book3s64.c

@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 	changed = !pmd_same(*(pmdp), entry);
 	changed = !pmd_same(*(pmdp), entry);
 	if (changed) {
 	if (changed) {
 		__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
 		__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
-		/*
-		 * Since we are not supporting SW TLB systems, we don't
-		 * have any thing similar to flush_tlb_page_nohash()
-		 */
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	}
 	}
 	return changed;
 	return changed;
 }
 }

+ 10 - 13
arch/powerpc/mm/pgtable-radix.c

@@ -296,11 +296,6 @@ found:
 void __init radix__early_init_mmu(void)
 void __init radix__early_init_mmu(void)
 {
 {
 	unsigned long lpcr;
 	unsigned long lpcr;
-	/*
-	 * setup LPCR UPRT based on mmu_features
-	 */
-	lpcr = mfspr(SPRN_LPCR);
-	mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 
 
 #ifdef CONFIG_PPC_64K_PAGES
 #ifdef CONFIG_PPC_64K_PAGES
 	/* PAGE_SIZE mappings */
 	/* PAGE_SIZE mappings */
@@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void)
 	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
 
 	radix_init_page_sizes();
 	radix_init_page_sizes();
-	if (!firmware_has_feature(FW_FEATURE_LPAR))
+	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+		lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 		radix_init_partition_table();
 		radix_init_partition_table();
+	}
 
 
 	radix_init_pgtable();
 	radix_init_pgtable();
 }
 }
@@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void)
 {
 {
 	unsigned long lpcr;
 	unsigned long lpcr;
 	/*
 	/*
-	 * setup LPCR UPRT based on mmu_features
+	 * update partition table control register and UPRT
 	 */
 	 */
-	lpcr = mfspr(SPRN_LPCR);
-	mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
-	/*
-	 * update partition table control register, 64 K size.
-	 */
-	if (!firmware_has_feature(FW_FEATURE_LPAR))
+	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+		lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+
 		mtspr(SPRN_PTCR,
 		mtspr(SPRN_PTCR,
 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+	}
 }
 }
 
 
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,

+ 1 - 1
arch/powerpc/platforms/512x/clock-commonclk.c

@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
 /* convenience wrappers around the common clk API */
 /* convenience wrappers around the common clk API */
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 {
 {
-	return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+	return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
 }
 }
 
 
 static inline struct clk *mpc512x_clk_factor(
 static inline struct clk *mpc512x_clk_factor(

+ 1 - 1
arch/powerpc/platforms/cell/spufs/coredump.c

@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
 	if (rc < 0)
 	if (rc < 0)
 		goto out;
 		goto out;
 
 
-	skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	if (!dump_skip(cprm, skip))
 	if (!dump_skip(cprm, skip))
 		goto Eio;
 		goto Eio;
 out:
 out:

+ 33 - 16
arch/powerpc/platforms/pseries/eeh_pseries.c

@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
 static int ibm_slot_error_detail;
 static int ibm_slot_error_detail;
 static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
 static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
 static int ibm_configure_pe;
 static int ibm_configure_pe;
 
 
 /*
 /*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
 	ibm_get_config_addr_info2	= rtas_token("ibm,get-config-addr-info2");
 	ibm_get_config_addr_info2	= rtas_token("ibm,get-config-addr-info2");
 	ibm_get_config_addr_info	= rtas_token("ibm,get-config-addr-info");
 	ibm_get_config_addr_info	= rtas_token("ibm,get-config-addr-info");
 	ibm_configure_pe		= rtas_token("ibm,configure-pe");
 	ibm_configure_pe		= rtas_token("ibm,configure-pe");
-	ibm_configure_bridge		= rtas_token("ibm,configure-bridge");
+
+	/*
+	 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+	 * however ibm,configure-pe can be faster.  If we can't find
+	 * ibm,configure-pe then fall back to using ibm,configure-bridge.
+	 */
+	if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+		ibm_configure_pe 	= rtas_token("ibm,configure-bridge");
 
 
 	/*
 	/*
 	 * Necessary sanity check. We needn't check "get-config-addr-info"
 	 * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
 	    (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
 	    (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
 	     ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE)	||
 	     ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE)	||
 	    ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE	||
 	    ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE	||
-	    (ibm_configure_pe == RTAS_UNKNOWN_SERVICE		&&
-	     ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+	    ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
 		pr_info("EEH functionality not supported\n");
 		pr_info("EEH functionality not supported\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
 {
 {
 	int config_addr;
 	int config_addr;
 	int ret;
 	int ret;
+	/* Waiting 0.2s maximum before skipping configuration */
+	int max_wait = 200;
 
 
 	/* Figure out the PE address */
 	/* Figure out the PE address */
 	config_addr = pe->config_addr;
 	config_addr = pe->config_addr;
 	if (pe->addr)
 	if (pe->addr)
 		config_addr = pe->addr;
 		config_addr = pe->addr;
 
 
-	/* Use new configure-pe function, if supported */
-	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+	while (max_wait > 0) {
 		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
 		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
 				config_addr, BUID_HI(pe->phb->buid),
 				config_addr, BUID_HI(pe->phb->buid),
 				BUID_LO(pe->phb->buid));
 				BUID_LO(pe->phb->buid));
-	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
-		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
-				config_addr, BUID_HI(pe->phb->buid),
-				BUID_LO(pe->phb->buid));
-	} else {
-		return -EFAULT;
-	}
 
 
-	if (ret)
-		pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
-			__func__, pe->phb->global_number, pe->addr, ret);
+		if (!ret)
+			return ret;
+
+		/*
+		 * If RTAS returns a delay value that's above 100ms, cut it
+		 * down to 100ms in case firmware made a mistake.  For more
+		 * on how these delay values work see rtas_busy_delay_time
+		 */
+		if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+		    ret <= RTAS_EXTENDED_DELAY_MAX)
+			ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+		max_wait -= rtas_busy_delay_time(ret);
+
+		if (max_wait < 0)
+			break;
+
+		rtas_busy_delay(ret);
+	}
 
 
+	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+		__func__, pe->phb->global_number, pe->addr, ret);
 	return ret;
 	return ret;
 }
 }
 
 

+ 12 - 10
arch/x86/kvm/cpuid.c

@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 			     struct kvm_cpuid_entry __user *entries)
 			     struct kvm_cpuid_entry __user *entries)
 {
 {
 	int r, i;
 	int r, i;
-	struct kvm_cpuid_entry *cpuid_entries;
+	struct kvm_cpuid_entry *cpuid_entries = NULL;
 
 
 	r = -E2BIG;
 	r = -E2BIG;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		goto out;
 		goto out;
 	r = -ENOMEM;
 	r = -ENOMEM;
-	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-	if (!cpuid_entries)
-		goto out;
-	r = -EFAULT;
-	if (copy_from_user(cpuid_entries, entries,
-			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-		goto out_free;
+	if (cpuid->nent) {
+		cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+					cpuid->nent);
+		if (!cpuid_entries)
+			goto out;
+		r = -EFAULT;
+		if (copy_from_user(cpuid_entries, entries,
+				   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+			goto out;
+	}
 	for (i = 0; i < cpuid->nent; i++) {
 	for (i = 0; i < cpuid->nent; i++) {
 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 	kvm_x86_ops->cpuid_update(vcpu);
 	kvm_x86_ops->cpuid_update(vcpu);
 	r = kvm_update_cpuid(vcpu);
 	r = kvm_update_cpuid(vcpu);
 
 
-out_free:
-	vfree(cpuid_entries);
 out:
 out:
+	vfree(cpuid_entries);
 	return r;
 	return r;
 }
 }
 
 

+ 4 - 4
arch/x86/kvm/mmu.c

@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 static void __set_spte(u64 *sptep, u64 spte)
 static void __set_spte(u64 *sptep, u64 spte)
 {
 {
-	*sptep = spte;
+	WRITE_ONCE(*sptep, spte);
 }
 }
 
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 {
 {
-	*sptep = spte;
+	WRITE_ONCE(*sptep, spte);
 }
 }
 
 
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
 	 */
 	 */
 	smp_wmb();
 	smp_wmb();
 
 
-	ssptep->spte_low = sspte.spte_low;
+	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 }
 }
 
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 	ssptep = (union split_spte *)sptep;
 	ssptep = (union split_spte *)sptep;
 	sspte = (union split_spte)spte;
 	sspte = (union split_spte)spte;
 
 
-	ssptep->spte_low = sspte.spte_low;
+	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 
 
 	/*
 	/*
 	 * If we map the spte from present to nonpresent, we should clear
 	 * If we map the spte from present to nonpresent, we should clear

+ 11 - 1
arch/x86/kvm/x86.c

@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_AMD64_NB_CFG:
 	case MSR_AMD64_NB_CFG:
 	case MSR_FAM10H_MMIO_CONF_BASE:
 	case MSR_FAM10H_MMIO_CONF_BASE:
 	case MSR_AMD64_BU_CFG2:
 	case MSR_AMD64_BU_CFG2:
+	case MSR_IA32_PERF_CTL:
 		msr_info->data = 0;
 		msr_info->data = 0;
 		break;
 		break;
 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 			      | KVM_VCPUEVENT_VALID_SMM))
 			      | KVM_VCPUEVENT_VALID_SMM))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	if (events->exception.injected &&
+	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+		return -EINVAL;
+
 	process_nmi(vcpu);
 	process_nmi(vcpu);
 	vcpu->arch.exception.pending = events->exception.injected;
 	vcpu->arch.exception.pending = events->exception.injected;
 	vcpu->arch.exception.nr = events->exception.nr;
 	vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
 	if (dbgregs->flags)
 	if (dbgregs->flags)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	if (dbgregs->dr6 & ~0xffffffffull)
+		return -EINVAL;
+	if (dbgregs->dr7 & ~0xffffffffull)
+		return -EINVAL;
+
 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
 	kvm_update_dr0123(vcpu);
 	kvm_update_dr0123(vcpu);
 	vcpu->arch.dr6 = dbgregs->dr6;
 	vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 
 
 	slot = id_to_memslot(slots, id);
 	slot = id_to_memslot(slots, id);
 	if (size) {
 	if (size) {
-		if (WARN_ON(slot->npages))
+		if (slot->npages)
 			return -EEXIST;
 			return -EEXIST;
 
 
 		/*
 		/*

+ 0 - 9
drivers/acpi/acpi_processor.c

@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 
 
 		pr->pblk = object.processor.pblk_address;
 		pr->pblk = object.processor.pblk_address;
-
-		/*
-		 * We don't care about error returns - we just try to mark
-		 * these reserved so that nobody else is confused into thinking
-		 * that this region might be unused..
-		 *
-		 * (In particular, allocating the IO range for Cardbus)
-		 */
-		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
 	}
 	}
 
 
 	/*
 	/*

+ 6 - 3
drivers/acpi/acpi_video.c

@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
 }
 }
 
 
 int acpi_video_get_levels(struct acpi_device *device,
 int acpi_video_get_levels(struct acpi_device *device,
-			  struct acpi_video_device_brightness **dev_br)
+			  struct acpi_video_device_brightness **dev_br,
+			  int *pmax_level)
 {
 {
 	union acpi_object *obj = NULL;
 	union acpi_object *obj = NULL;
 	int i, max_level = 0, count = 0, level_ac_battery = 0;
 	int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
 
 
 	br->count = count;
 	br->count = count;
 	*dev_br = br;
 	*dev_br = br;
+	if (pmax_level)
+		*pmax_level = max_level;
 
 
 out:
 out:
 	kfree(obj);
 	kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
 	struct acpi_video_device_brightness *br = NULL;
 	struct acpi_video_device_brightness *br = NULL;
 	int result = -EINVAL;
 	int result = -EINVAL;
 
 
-	result = acpi_video_get_levels(device->dev, &br);
+	result = acpi_video_get_levels(device->dev, &br, &max_level);
 	if (result)
 	if (result)
 		return result;
 		return result;
 	device->brightness = br;
 	device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
 
 
 	mutex_lock(&video->device_list_lock);
 	mutex_lock(&video->device_list_lock);
 	list_for_each_entry(dev, &video->video_device_list, entry) {
 	list_for_each_entry(dev, &video->video_device_list, entry) {
-		if (!acpi_video_device_lcd_query_levels(dev, &levels))
+		if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
 			kfree(levels);
 			kfree(levels);
 	}
 	}
 	mutex_unlock(&video->device_list_lock);
 	mutex_unlock(&video->device_list_lock);

+ 9 - 14
drivers/acpi/acpica/hwregs.c

@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
 static u8
 static u8
 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
 {
 {
-	u64 address;
-
 	if (!reg->access_width) {
 	if (!reg->access_width) {
+		if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+			max_bit_width = 32;
+		}
+
 		/*
 		/*
 		 * Detect old register descriptors where only the bit_width field
 		 * Detect old register descriptors where only the bit_width field
-		 * makes senses. The target address is copied to handle possible
-		 * alignment issues.
+		 * makes senses.
 		 */
 		 */
-		ACPI_MOVE_64_TO_64(&address, &reg->address);
-		if (!reg->bit_offset && reg->bit_width &&
+		if (reg->bit_width < max_bit_width &&
+		    !reg->bit_offset && reg->bit_width &&
 		    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
 		    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
-		    ACPI_IS_ALIGNED(reg->bit_width, 8) &&
-		    ACPI_IS_ALIGNED(address, reg->bit_width)) {
+		    ACPI_IS_ALIGNED(reg->bit_width, 8)) {
 			return (reg->bit_width);
 			return (reg->bit_width);
-		} else {
-			if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-				return (32);
-			} else {
-				return (max_bit_width);
-			}
 		}
 		}
+		return (max_bit_width);
 	} else {
 	} else {
 		return (1 << (reg->access_width + 2));
 		return (1 << (reg->access_width + 2));
 	}
 	}

+ 1 - 1
drivers/acpi/bus.c

@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
 	 * Maybe EC region is required at bus_scan/acpi_get_devices. So it
 	 * Maybe EC region is required at bus_scan/acpi_get_devices. So it
 	 * is necessary to enable it as early as possible.
 	 * is necessary to enable it as early as possible.
 	 */
 	 */
-	acpi_boot_ec_enable();
+	acpi_ec_dsdt_probe();
 
 
 	printk(KERN_INFO PREFIX "Interpreter enabled\n");
 	printk(KERN_INFO PREFIX "Interpreter enabled\n");
 
 

+ 22 - 7
drivers/acpi/ec.c

@@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
 	return AE_OK;
 	return AE_OK;
 }
 }
 
 
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+	{"PNP0C09", 0},
+	{"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
 {
 {
-	if (!boot_ec)
+	acpi_status status;
+
+	if (boot_ec)
 		return 0;
 		return 0;
+
+	/*
+	 * Finding EC from DSDT if there is no ECDT EC available. When this
+	 * function is invoked, ACPI tables have been fully loaded, we can
+	 * walk namespace now.
+	 */
+	boot_ec = make_acpi_ec();
+	if (!boot_ec)
+		return -ENOMEM;
+	status = acpi_get_devices(ec_device_ids[0].id,
+				  ec_parse_device, boot_ec, NULL);
+	if (ACPI_FAILURE(status) || !boot_ec->handle)
+		return -ENODEV;
 	if (!ec_install_handlers(boot_ec)) {
 	if (!ec_install_handlers(boot_ec)) {
 		first_ec = boot_ec;
 		first_ec = boot_ec;
 		return 0;
 		return 0;
@@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
 	return -EFAULT;
 	return -EFAULT;
 }
 }
 
 
-static const struct acpi_device_id ec_device_ids[] = {
-	{"PNP0C09", 0},
-	{"", 0},
-};
-
 #if 0
 #if 0
 /*
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not

+ 1 - 1
drivers/acpi/internal.h

@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
 
 
 int acpi_ec_init(void);
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
 int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions_early(void);
 void acpi_ec_unblock_transactions_early(void);

+ 9 - 0
drivers/acpi/processor_throttling.c

@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
 	if (!pr->flags.throttling)
 	if (!pr->flags.throttling)
 		return -ENODEV;
 		return -ENODEV;
 
 
+	/*
+	 * We don't care about error returns - we just try to mark
+	 * these reserved so that nobody else is confused into thinking
+	 * that this region might be unused..
+	 *
+	 * (In particular, allocating the IO range for Cardbus)
+	 */
+	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
 	pr->throttling.state = 0;
 	pr->throttling.state = 0;
 
 
 	duty_mask = pr->throttling.state_count - 1;
 	duty_mask = pr->throttling.state_count - 1;

+ 1 - 0
drivers/clk/Kconfig

@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
 config COMMON_CLK_NXP
 config COMMON_CLK_NXP
 	def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
 	def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
 	select REGMAP_MMIO if ARCH_LPC32XX
 	select REGMAP_MMIO if ARCH_LPC32XX
+	select MFD_SYSCON if ARCH_LPC18XX
 	---help---
 	---help---
 	  Support for clock providers on NXP platforms.
 	  Support for clock providers on NXP platforms.
 
 

+ 5 - 5
drivers/clk/microchip/clk-pic32mzda.c

@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
 
 
 	/* register fixed rate clocks */
 	/* register fixed rate clocks */
 	clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
 	clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
-						CLK_IS_ROOT, 24000000);
+						0, 24000000);
 	clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
 	clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
-						CLK_IS_ROOT, 8000000);
+						0, 8000000);
 	clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
 	clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
-						CLK_IS_ROOT, 8000000);
+						0, 8000000);
 	clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
 	clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
-						CLK_IS_ROOT, 32000);
+						0, 32000);
 	clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
 	clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
-						CLK_IS_ROOT, 24000000);
+						0, 24000000);
 	/* fixed rate (optional) clock */
 	/* fixed rate (optional) clock */
 	if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
 	if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
 		pr_info("pic32-clk: dt requests SOSC.\n");
 		pr_info("pic32-clk: dt requests SOSC.\n");

+ 1 - 1
drivers/cpufreq/cpufreq.c

@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 					unsigned int target_freq)
 					unsigned int target_freq)
 {
 {
-	clamp_val(target_freq, policy->min, policy->max);
+	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
 
 	return cpufreq_driver->fast_switch(policy, target_freq);
 	return cpufreq_driver->fast_switch(policy, target_freq);
 }
 }

+ 10 - 4
drivers/cpufreq/intel_pstate.c

@@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 		cpu->acpi_perf_data.states[0].core_frequency =
 		cpu->acpi_perf_data.states[0].core_frequency =
 					policy->cpuinfo.max_freq / 1000;
 					policy->cpuinfo.max_freq / 1000;
 	cpu->valid_pss_table = true;
 	cpu->valid_pss_table = true;
-	pr_info("_PPC limits will be enforced\n");
+	pr_debug("_PPC limits will be enforced\n");
 
 
 	return;
 	return;
 
 
@@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 
 	intel_pstate_clear_update_util_hook(policy->cpu);
 	intel_pstate_clear_update_util_hook(policy->cpu);
 
 
+	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+		 policy->cpuinfo.max_freq, policy->max);
+
 	cpu = all_cpu_data[0];
 	cpu = all_cpu_data[0];
 	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
 	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
 	    policy->max < policy->cpuinfo.max_freq &&
 	    policy->max < policy->cpuinfo.max_freq &&
@@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 				   limits->max_sysfs_pct);
 				   limits->max_sysfs_pct);
 	limits->max_perf_pct = max(limits->min_policy_pct,
 	limits->max_perf_pct = max(limits->min_policy_pct,
 				   limits->max_perf_pct);
 				   limits->max_perf_pct);
-	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
 
 	/* Make sure min_perf_pct <= max_perf_pct */
 	/* Make sure min_perf_pct <= max_perf_pct */
 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
 
 	limits->min_perf = div_fp(limits->min_perf_pct, 100);
 	limits->min_perf = div_fp(limits->min_perf_pct, 100);
 	limits->max_perf = div_fp(limits->max_perf_pct, 100);
 	limits->max_perf = div_fp(limits->max_perf_pct, 100);
+	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
 
  out:
  out:
 	intel_pstate_set_update_util_hook(policy->cpu);
 	intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
 
 	/* cpuinfo and default policy values */
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
-	policy->cpuinfo.max_freq =
-		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+	update_turbo_state();
+	policy->cpuinfo.max_freq = limits->turbo_disabled ?
+			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
 	intel_pstate_init_acpi_perf_limits(policy);
 	intel_pstate_init_acpi_perf_limits(policy);
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
 	cpumask_set_cpu(policy->cpu, policy->cpus);

+ 4 - 3
drivers/dma-buf/dma-buf.c

@@ -33,6 +33,7 @@
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 #include <linux/poll.h>
 #include <linux/reservation.h>
 #include <linux/reservation.h>
+#include <linux/mm.h>
 
 
 #include <uapi/linux/dma-buf.h>
 #include <uapi/linux/dma-buf.h>
 
 
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 	dmabuf = file->private_data;
 	dmabuf = file->private_data;
 
 
 	/* check for overflowing the buffer's size */
 	/* check for overflowing the buffer's size */
-	if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	if (vma->vm_pgoff + vma_pages(vma) >
 	    dmabuf->size >> PAGE_SHIFT)
 	    dmabuf->size >> PAGE_SHIFT)
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* check for offset overflow */
 	/* check for offset overflow */
-	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+	if (pgoff + vma_pages(vma) < pgoff)
 		return -EOVERFLOW;
 		return -EOVERFLOW;
 
 
 	/* check for overflowing the buffer's size */
 	/* check for overflowing the buffer's size */
-	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	if (pgoff + vma_pages(vma) >
 	    dmabuf->size >> PAGE_SHIFT)
 	    dmabuf->size >> PAGE_SHIFT)
 		return -EINVAL;
 		return -EINVAL;
 
 

+ 68 - 4
drivers/dma-buf/reservation.c

@@ -35,6 +35,17 @@
 #include <linux/reservation.h>
 #include <linux/reservation.h>
 #include <linux/export.h>
 #include <linux/export.h>
 
 
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
 DEFINE_WW_CLASS(reservation_ww_class);
 DEFINE_WW_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
 
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
 
 
 const char reservation_seqcount_string[] = "reservation_seqcount";
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
 EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
  */
  */
 int reservation_object_reserve_shared(struct reservation_object *obj)
 int reservation_object_reserve_shared(struct reservation_object *obj)
 {
 {
@@ -180,7 +199,11 @@ done:
 		fence_put(old_fence);
 		fence_put(old_fence);
 }
 }
 
 
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
  * Add a fence to a shared slot, obj->lock must be held, and
  * Add a fence to a shared slot, obj->lock must be held, and
  * reservation_object_reserve_shared_fence has been called.
  * reservation_object_reserve_shared_fence has been called.
  */
  */
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 }
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
 
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
 void reservation_object_add_excl_fence(struct reservation_object *obj,
 				       struct fence *fence)
 				       struct fence *fence)
 {
 {
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 }
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
 
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				      struct fence **pfence_excl,
 				      struct fence **pfence_excl,
 				      unsigned *pshared_count,
 				      unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
 }
 }
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 
 
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
 					 bool wait_all, bool intr,
 					 unsigned long timeout)
 					 unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
 	return ret;
 	return ret;
 }
 }
 
 
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 					  bool test_all)
 					  bool test_all)
 {
 {

+ 2 - 1
drivers/edac/edac_mc.c

@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
 	list_for_each(item, &mc_devices) {
 	list_for_each(item, &mc_devices) {
 		mci = list_entry(item, struct mem_ctl_info, link);
 		mci = list_entry(item, struct mem_ctl_info, link);
 
 
-		edac_mod_work(&mci->work, value);
+		if (mci->op_state == OP_RUNNING_POLL)
+			edac_mod_work(&mci->work, value);
 	}
 	}
 	mutex_unlock(&mem_ctls_mutex);
 	mutex_unlock(&mem_ctls_mutex);
 }
 }

+ 22 - 13
drivers/edac/sb_edac.c

@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 };
 };
 
 
-#define RIR_RNK_TGT(reg)		GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg)		GET_BITFIELD(reg,  2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 
 
 /* Device 16, functions 2-7 */
 /* Device 16, functions 2-7 */
 
 
@@ -326,6 +329,7 @@ struct pci_id_descr {
 struct pci_id_table {
 struct pci_id_table {
 	const struct pci_id_descr	*descr;
 	const struct pci_id_descr	*descr;
 	int				n_devs;
 	int				n_devs;
+	enum type			type;
 };
 };
 
 
 struct sbridge_dev {
 struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)		},
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)		},
 };
 };
 
 
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) {	\
+	.descr = A,			\
+	.n_devs = ARRAY_SIZE(A),	\
+	.type = T			\
+}
+
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 	{0,}			/* 0 terminated list. */
 };
 };
 
 
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 };
 };
 
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 	{0,}			/* 0 terminated list. */
 };
 };
 
 
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
 };
 };
 
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
 	{0,}			/* 0 terminated list. */
 	{0,}			/* 0 terminated list. */
 };
 };
 
 
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
 };
 };
 
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
 	{0,}
 	{0,}
 };
 };
 
 
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 };
 };
 
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
 	{0,}			/* 0 terminated list. */
 	{0,}			/* 0 terminated list. */
 };
 };
 
 
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
 				pci_read_config_dword(pvt->pci_tad[i],
 				pci_read_config_dword(pvt->pci_tad[i],
 						      rir_offset[j][k],
 						      rir_offset[j][k],
 						      &reg);
 						      &reg);
-				tmp_mb = RIR_OFFSET(reg) << 6;
+				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
 
 
 				gb = div_u64_rem(tmp_mb, 1024, &mb);
 				gb = div_u64_rem(tmp_mb, 1024, &mb);
 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
 					 i, j, k,
 					 i, j, k,
 					 gb, (mb*1000)/1024,
 					 gb, (mb*1000)/1024,
 					 ((u64)tmp_mb) << 20L,
 					 ((u64)tmp_mb) << 20L,
-					 (u32)RIR_RNK_TGT(reg),
+					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
 					 reg);
 					 reg);
 			}
 			}
 		}
 		}
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
 	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
 			      rir_offset[n_rir][idx],
 			      rir_offset[n_rir][idx],
 			      &reg);
 			      &reg);
-	*rank = RIR_RNK_TGT(reg);
+	*rank = RIR_RNK_TGT(pvt->info.type, reg);
 
 
 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
 		 n_rir,
 		 n_rir,
@@ -3357,12 +3366,12 @@ fail0:
 #define ICPU(model, table) \
 #define ICPU(model, table) \
 	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
 
-/* Order here must match "enum type" */
 static const struct x86_cpu_id sbridge_cpuids[] = {
 static const struct x86_cpu_id sbridge_cpuids[] = {
 	ICPU(0x2d, pci_dev_descr_sbridge_table),	/* SANDY_BRIDGE */
 	ICPU(0x2d, pci_dev_descr_sbridge_table),	/* SANDY_BRIDGE */
 	ICPU(0x3e, pci_dev_descr_ibridge_table),	/* IVY_BRIDGE */
 	ICPU(0x3e, pci_dev_descr_ibridge_table),	/* IVY_BRIDGE */
 	ICPU(0x3f, pci_dev_descr_haswell_table),	/* HASWELL */
 	ICPU(0x3f, pci_dev_descr_haswell_table),	/* HASWELL */
 	ICPU(0x4f, pci_dev_descr_broadwell_table),	/* BROADWELL */
 	ICPU(0x4f, pci_dev_descr_broadwell_table),	/* BROADWELL */
+	ICPU(0x56, pci_dev_descr_broadwell_table),	/* BROADWELL-DE */
 	ICPU(0x57, pci_dev_descr_knl_table),		/* KNIGHTS_LANDING */
 	ICPU(0x57, pci_dev_descr_knl_table),		/* KNIGHTS_LANDING */
 	{ }
 	{ }
 };
 };
@@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
 			 mc, mc + 1, num_mc);
 			 mc, mc + 1, num_mc);
 
 
 		sbridge_dev->mc = mc++;
 		sbridge_dev->mc = mc++;
-		rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+		rc = sbridge_register_mci(sbridge_dev, ptable->type);
 		if (unlikely(rc < 0))
 		if (unlikely(rc < 0))
 			goto fail1;
 			goto fail1;
 	}
 	}

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -799,6 +799,7 @@ struct amdgpu_ring {
 	unsigned		cond_exe_offs;
 	unsigned		cond_exe_offs;
 	u64				cond_exe_gpu_addr;
 	u64				cond_exe_gpu_addr;
 	volatile u32	*cond_exe_cpu_addr;
 	volatile u32	*cond_exe_cpu_addr;
+	int                     vmid;
 };
 };
 
 
 /*
 /*
@@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
 		    unsigned vm_id, uint64_t pd_addr,
 		    unsigned vm_id, uint64_t pd_addr,
 		    uint32_t gds_base, uint32_t gds_size,
 		    uint32_t gds_base, uint32_t gds_size,
 		    uint32_t gws_base, uint32_t gws_size,
 		    uint32_t gws_base, uint32_t gws_size,
-		    uint32_t oa_base, uint32_t oa_size);
+		    uint32_t oa_base, uint32_t oa_size,
+		    bool vmid_switch);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,

+ 12 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 	return result;
 	return result;
 }
 }
 
 
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+	CGS_FUNC_ADEV;
+	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+		release_firmware(adev->pm.fw);
+		return 0;
+	}
+	/* cannot release other firmware because they are not created by cgs */
+	return -EINVAL;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type,
 					enum cgs_ucode_id type,
 					struct cgs_firmware_info *info)
 					struct cgs_firmware_info *info)
@@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 	amdgpu_cgs_pm_query_clock_limits,
 	amdgpu_cgs_pm_query_clock_limits,
 	amdgpu_cgs_set_camera_voltages,
 	amdgpu_cgs_set_camera_voltages,
 	amdgpu_cgs_get_firmware_info,
 	amdgpu_cgs_get_firmware_info,
+	amdgpu_cgs_rel_firmware,
 	amdgpu_cgs_set_powergating_state,
 	amdgpu_cgs_set_powergating_state,
 	amdgpu_cgs_set_clockgating_state,
 	amdgpu_cgs_set_clockgating_state,
 	amdgpu_cgs_get_active_displays_info,
 	amdgpu_cgs_get_active_displays_info,

+ 9 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  */
  */
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 {
 {
-	if (adev->mode_info.atom_context)
+	if (adev->mode_info.atom_context) {
 		kfree(adev->mode_info.atom_context->scratch);
 		kfree(adev->mode_info.atom_context->scratch);
+		kfree(adev->mode_info.atom_context->iio);
+	}
 	kfree(adev->mode_info.atom_context);
 	kfree(adev->mode_info.atom_context);
 	adev->mode_info.atom_context = NULL;
 	adev->mode_info.atom_context = NULL;
 	kfree(adev->mode_info.atom_card_info);
 	kfree(adev->mode_info.atom_card_info);
@@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
 		adev->ip_block_status[i].valid = false;
 		adev->ip_block_status[i].valid = false;
 	}
 	}
 
 
+	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (adev->ip_blocks[i].funcs->late_fini)
+			adev->ip_blocks[i].funcs->late_fini((void *)adev);
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		amdgpu_atombios_has_gpu_virtualization_table(adev);
 		amdgpu_atombios_has_gpu_virtualization_table(adev);
 
 
 	/* Post card if necessary */
 	/* Post card if necessary */
-	if (!amdgpu_card_posted(adev) ||
-	    adev->virtualization.supports_sr_iov) {
+	if (!amdgpu_card_posted(adev)) {
 		if (!adev->bios) {
 		if (!adev->bios) {
 			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
 			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
 			return -EINVAL;

+ 7 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

@@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	bool skip_preamble, need_ctx_switch;
 	bool skip_preamble, need_ctx_switch;
 	unsigned patch_offset = ~0;
 	unsigned patch_offset = ~0;
 	struct amdgpu_vm *vm;
 	struct amdgpu_vm *vm;
+	int vmid = 0, old_vmid = ring->vmid;
 	struct fence *hwf;
 	struct fence *hwf;
 	uint64_t ctx;
 	uint64_t ctx;
 
 
@@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	if (job) {
 	if (job) {
 		vm = job->vm;
 		vm = job->vm;
 		ctx = job->ctx;
 		ctx = job->ctx;
+		vmid = job->vm_id;
 	} else {
 	} else {
 		vm = NULL;
 		vm = NULL;
 		ctx = 0;
 		ctx = 0;
+		vmid = 0;
 	}
 	}
 
 
 	if (!ring->ready) {
 	if (!ring->ready) {
@@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
 		r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
 				    job->gds_base, job->gds_size,
 				    job->gds_base, job->gds_size,
 				    job->gws_base, job->gws_size,
 				    job->gws_base, job->gws_size,
-				    job->oa_base, job->oa_size);
+				    job->oa_base, job->oa_size,
+				    (ring->current_ctx == ctx) && (old_vmid != vmid));
 		if (r) {
 		if (r) {
 			amdgpu_ring_undo(ring);
 			amdgpu_ring_undo(ring);
 			return r;
 			return r;
@@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	need_ctx_switch = ring->current_ctx != ctx;
 	need_ctx_switch = ring->current_ctx != ctx;
 	for (i = 0; i < num_ibs; ++i) {
 	for (i = 0; i < num_ibs; ++i) {
 		ib = &ibs[i];
 		ib = &ibs[i];
-
 		/* drop preamble IBs if we don't have a context switch */
 		/* drop preamble IBs if we don't have a context switch */
 		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
 		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
 			continue;
 			continue;
@@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
 		amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
 				    need_ctx_switch);
 				    need_ctx_switch);
 		need_ctx_switch = false;
 		need_ctx_switch = false;
+		ring->vmid = vmid;
 	}
 	}
 
 
 	if (ring->funcs->emit_hdp_invalidate)
 	if (ring->funcs->emit_hdp_invalidate)
@@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
 		if (job && job->vm_id)
 		if (job && job->vm_id)
 			amdgpu_vm_reset_id(adev, job->vm_id);
 			amdgpu_vm_reset_id(adev, job->vm_id);
+		ring->vmid = old_vmid;
 		amdgpu_ring_undo(ring);
 		amdgpu_ring_undo(ring);
 		return r;
 		return r;
 	}
 	}

+ 17 - 7
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c

@@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
-	if (adev->pp_enabled) {
-		amdgpu_pm_sysfs_fini(adev);
-		amd_powerplay_fini(adev->powerplay.pp_handle);
-	}
-#endif
-
 	return ret;
 	return ret;
 }
 }
 
 
@@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
 	return ret;
 	return ret;
 }
 }
 
 
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pp_enabled) {
+		amdgpu_pm_sysfs_fini(adev);
+		amd_powerplay_fini(adev->powerplay.pp_handle);
+	}
+
+	if (adev->powerplay.ip_funcs->late_fini)
+		adev->powerplay.ip_funcs->late_fini(
+			  adev->powerplay.pp_handle);
+#endif
+}
+
 static int amdgpu_pp_suspend(void *handle)
 static int amdgpu_pp_suspend(void *handle)
 {
 {
 	int ret = 0;
 	int ret = 0;
@@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
 	.sw_fini = amdgpu_pp_sw_fini,
 	.sw_fini = amdgpu_pp_sw_fini,
 	.hw_init = amdgpu_pp_hw_init,
 	.hw_init = amdgpu_pp_hw_init,
 	.hw_fini = amdgpu_pp_hw_fini,
 	.hw_fini = amdgpu_pp_hw_fini,
+	.late_fini = amdgpu_pp_late_fini,
 	.suspend = amdgpu_pp_suspend,
 	.suspend = amdgpu_pp_suspend,
 	.resume = amdgpu_pp_resume,
 	.resume = amdgpu_pp_resume,
 	.is_idle = amdgpu_pp_is_idle,
 	.is_idle = amdgpu_pp_is_idle,

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c

@@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 	ring->ring = NULL;
 	ring->ring = NULL;
 	ring->ring_obj = NULL;
 	ring->ring_obj = NULL;
 
 
+	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_wb_free(ring->adev, ring->wptr_offs);
 	amdgpu_wb_free(ring->adev, ring->wptr_offs);

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
 		return r;
 		return r;
 	}
 	}
 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	memset(sa_manager->cpu_ptr, 0, sa_manager->size);
 	amdgpu_bo_unreserve(sa_manager->bo);
 	amdgpu_bo_unreserve(sa_manager->bo);
 	return r;
 	return r;
 }
 }

+ 10 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
 {
 	int r;
 	int r;
 
 
-	if (adev->uvd.vcpu_bo == NULL)
-		return 0;
+	kfree(adev->uvd.saved_bo);
 
 
 	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
 
-	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-	if (!r) {
-		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-	}
+	if (adev->uvd.vcpu_bo) {
+		r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+		if (!r) {
+			amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+			amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+			amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+		}
 
 
-	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+	}
 
 
 	amdgpu_ring_fini(&adev->uvd.ring);
 	amdgpu_ring_fini(&adev->uvd.ring);
 
 

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
 		    unsigned vm_id, uint64_t pd_addr,
 		    unsigned vm_id, uint64_t pd_addr,
 		    uint32_t gds_base, uint32_t gds_size,
 		    uint32_t gds_base, uint32_t gds_size,
 		    uint32_t gws_base, uint32_t gws_size,
 		    uint32_t gws_base, uint32_t gws_size,
-		    uint32_t oa_base, uint32_t oa_size)
+		    uint32_t oa_base, uint32_t oa_size,
+		    bool vmid_switch)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
 	int r;
 	int r;
 
 
 	if (ring->funcs->emit_pipeline_sync && (
 	if (ring->funcs->emit_pipeline_sync && (
-	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
-		    ring->type == AMDGPU_RING_TYPE_COMPUTE))
+	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
 		amdgpu_ring_emit_pipeline_sync(ring);
 		amdgpu_ring_emit_pipeline_sync(ring);
 
 
 	if (ring->funcs->emit_vm_flush &&
 	if (ring->funcs->emit_vm_flush &&

Some files were not shown because too many files changed in this diff