Bläddra i källkod

Backmerge drm-fixes merge into Linus's tree into drm-next.

This merges '5b726e06d6e8309e5c9ef4109a32caf27c71dfc8' into drm-next

Just to resolve some merges to make Daniel's life easier.

Signed-off-by: DAve Airlie <airlied@redhat.com>
Dave Airlie 9 år sedan
förälder
incheckning
20f8e032e6
100 ändrade filer med 572 tillägg och 370 borttagningar
  1. 4 6
      Documentation/devicetree/bindings/dma/ti-edma.txt
  2. 4 0
      Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
  3. 1 1
      Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
  4. 6 1
      Documentation/devicetree/bindings/mtd/partition.txt
  5. 0 14
      Documentation/networking/e100.txt
  6. 18 2
      MAINTAINERS
  7. 1 1
      Makefile
  8. 1 0
      arch/arc/Kconfig
  9. 1 0
      arch/arc/boot/dts/axs10x_mb.dtsi
  10. 2 1
      arch/arc/boot/dts/nsim_hs.dts
  11. 2 2
      arch/arc/include/asm/mach_desc.h
  12. 2 2
      arch/arc/include/asm/smp.h
  13. 0 4
      arch/arc/include/asm/unwind.h
  14. 13 2
      arch/arc/kernel/intc-arcv2.c
  15. 24 9
      arch/arc/kernel/irq.c
  16. 1 1
      arch/arc/kernel/mcip.c
  17. 9 23
      arch/arc/kernel/perf_event.c
  18. 0 1
      arch/arc/kernel/setup.c
  19. 4 4
      arch/arc/kernel/smp.c
  20. 35 18
      arch/arc/kernel/unwind.c
  21. 3 1
      arch/arc/mm/init.c
  22. 2 2
      arch/arm/boot/dts/am4372.dtsi
  23. 8 0
      arch/arm/boot/dts/am43xx-clocks.dtsi
  24. 1 0
      arch/arm/boot/dts/at91-sama5d2_xplained.dts
  25. 5 3
      arch/arm/boot/dts/berlin2q.dtsi
  26. 6 2
      arch/arm/boot/dts/dm816x.dtsi
  27. 0 5
      arch/arm/boot/dts/vf610-colibri.dtsi
  28. 1 1
      arch/arm/boot/dts/vf610.dtsi
  29. 4 2
      arch/arm/boot/dts/vfxxx.dtsi
  30. 1 0
      arch/arm/include/asm/arch_gicv3.h
  31. 4 0
      arch/arm/include/asm/uaccess.h
  32. 18 15
      arch/arm/kernel/process.c
  33. 3 3
      arch/arm/kernel/swp_emulate.c
  34. 23 6
      arch/arm/lib/uaccess_with_memcpy.c
  35. 5 1
      arch/arm/mach-at91/Kconfig
  36. 6 1
      arch/arm/mach-at91/pm.c
  37. 5 1
      arch/arm/mach-exynos/pmu.c
  38. 6 6
      arch/arm/mach-ixp4xx/include/mach/io.h
  39. 1 1
      arch/arm/mach-omap2/Kconfig
  40. 5 0
      arch/arm/mach-pxa/ezx.c
  41. 1 1
      arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
  42. 1 1
      arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
  43. 26 12
      arch/arm/mm/context.c
  44. 1 1
      arch/arm/mm/dma-mapping.c
  45. 62 30
      arch/arm/mm/init.c
  46. 2 2
      arch/arm/mm/proc-v7.S
  47. 5 0
      arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
  48. 1 0
      arch/arm64/include/asm/arch_gicv3.h
  49. 8 4
      arch/arm64/include/asm/pgtable.h
  50. 3 2
      arch/arm64/kernel/vmlinux.lds.S
  51. 1 1
      arch/blackfin/kernel/perf_event.c
  52. 1 1
      arch/ia64/include/asm/unistd.h
  53. 1 0
      arch/ia64/include/uapi/asm/unistd.h
  54. 1 0
      arch/ia64/kernel/entry.S
  55. 2 1
      arch/microblaze/kernel/dma.c
  56. 1 1
      arch/mips/mm/dma-default.c
  57. 2 1
      arch/parisc/include/asm/pgtable.h
  58. 2 1
      arch/parisc/include/uapi/asm/unistd.h
  59. 0 18
      arch/parisc/kernel/pci.c
  60. 1 0
      arch/parisc/kernel/syscall_table.S
  61. 0 8
      arch/powerpc/boot/dts/sbc8641d.dts
  62. 12 12
      arch/powerpc/include/asm/systbl.h
  63. 0 12
      arch/powerpc/include/uapi/asm/unistd.h
  64. 4 10
      arch/powerpc/kernel/eeh_driver.c
  65. 6 0
      arch/powerpc/kvm/book3s_hv.c
  66. 38 26
      arch/powerpc/platforms/powernv/opal-irqchip.c
  67. 1 1
      arch/powerpc/platforms/powernv/opal.c
  68. 12 5
      arch/s390/kernel/dis.c
  69. 1 1
      arch/sh/include/uapi/asm/unistd_64.h
  70. 1 1
      arch/sh/kernel/perf_event.c
  71. 1 1
      arch/sparc/kernel/perf_event.c
  72. 1 1
      arch/tile/kernel/perf_event.c
  73. 1 1
      arch/um/Makefile
  74. 6 4
      arch/um/drivers/net_user.c
  75. 1 1
      arch/um/kernel/signal.c
  76. 1 1
      arch/x86/kernel/cpu/perf_event.c
  77. 3 2
      arch/x86/kernel/cpu/perf_event.h
  78. 1 1
      arch/x86/kernel/cpu/perf_event_intel.c
  79. 1 1
      arch/x86/kernel/cpu/perf_event_intel_cqm.c
  80. 3 1
      arch/x86/kernel/cpu/perf_event_intel_lbr.c
  81. 1 1
      arch/x86/kernel/irq_work.c
  82. 8 0
      arch/x86/kvm/cpuid.h
  83. 19 6
      arch/x86/kvm/mtrr.c
  84. 2 2
      arch/x86/kvm/svm.c
  85. 4 3
      arch/x86/kvm/vmx.c
  86. 8 4
      arch/x86/kvm/x86.c
  87. 1 1
      arch/x86/mm/dump_pagetables.c
  88. 10 8
      arch/x86/um/signal.c
  89. 2 7
      arch/x86/xen/mmu.c
  90. 10 10
      arch/x86/xen/suspend.c
  91. 3 3
      block/blk-cgroup.c
  92. 14 2
      block/blk-core.c
  93. 1 1
      crypto/ablkcipher.c
  94. 1 1
      crypto/blkcipher.c
  95. 1 1
      drivers/acpi/nfit.c
  96. 12 10
      drivers/ata/ahci.c
  97. 5 0
      drivers/ata/ahci_mvebu.c
  98. 9 0
      drivers/ata/libahci.c
  99. 8 0
      drivers/ata/libata-eh.c
  100. 2 1
      drivers/ata/sata_fsl.c

+ 4 - 6
Documentation/devicetree/bindings/dma/ti-edma.txt

@@ -22,8 +22,7 @@ Required properties:
 Optional properties:
 Optional properties:
 - ti,hwmods:	Name of the hwmods associated to the eDMA CC
 - ti,hwmods:	Name of the hwmods associated to the eDMA CC
 - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
 - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
-		these channels will be SW triggered channels. The list must
-		contain 16 bits numbers, see example.
+		these channels will be SW triggered channels. See example.
 - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
 - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
 		the driver, they are allocated to be used by for example the
 		the driver, they are allocated to be used by for example the
 		DSP. See example.
 		DSP. See example.
@@ -56,10 +55,9 @@ edma: edma@49000000 {
 	ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
 	ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
 
 
 	/* Channel 20 and 21 is allocated for memcpy */
 	/* Channel 20 and 21 is allocated for memcpy */
-	ti,edma-memcpy-channels = /bits/ 16 <20 21>;
-	/* The following PaRAM slots are reserved: 35-45 and 100-110 */
-	ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
-				       /bits/ 16 <100 10>;
+	ti,edma-memcpy-channels = <20 21>;
+	/* The following PaRAM slots are reserved: 35-44 and 100-109 */
+	ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
 };
 };
 
 
 edma_tptc0: tptc@49800000 {
 edma_tptc0: tptc@49800000 {

+ 4 - 0
Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt

@@ -11,6 +11,10 @@ Required properties:
       0 = active high
       0 = active high
       1 = active low
       1 = active low
 
 
+Optional properties:
+- little-endian : GPIO registers are used as little endian. If not
+                  present registers are used as big endian by default.
+
 Example:
 Example:
 
 
 gpio0: gpio@1100 {
 gpio0: gpio@1100 {

+ 1 - 1
Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt

@@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
 Required subnode-properties:
 Required subnode-properties:
 	- label: Descriptive name of the key.
 	- label: Descriptive name of the key.
 	- linux,code: Keycode to emit.
 	- linux,code: Keycode to emit.
-	- channel: Channel this key is attached to, mut be 0 or 1.
+	- channel: Channel this key is attached to, must be 0 or 1.
 	- voltage: Voltage in µV at lradc input when this key is pressed.
 	- voltage: Voltage in µV at lradc input when this key is pressed.
 
 
 Example:
 Example:

+ 6 - 1
Documentation/devicetree/bindings/mtd/partition.txt

@@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
 as RedBoot.
 as RedBoot.
 
 
 The partition table should be a subnode of the mtd node and should be named
 The partition table should be a subnode of the mtd node and should be named
-'partitions'. Partitions are defined in subnodes of the partitions node.
+'partitions'. This node should have the following property:
+- compatible : (required) must be "fixed-partitions"
+Partitions are then defined in subnodes of the partitions node.
 
 
 For backwards compatibility partitions as direct subnodes of the mtd device are
 For backwards compatibility partitions as direct subnodes of the mtd device are
 supported. This use is discouraged.
 supported. This use is discouraged.
@@ -36,6 +38,7 @@ Examples:
 
 
 flash@0 {
 flash@0 {
 	partitions {
 	partitions {
+		compatible = "fixed-partitions";
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		#size-cells = <1>;
 
 
@@ -53,6 +56,7 @@ flash@0 {
 
 
 flash@1 {
 flash@1 {
 	partitions {
 	partitions {
+		compatible = "fixed-partitions";
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <2>;
 		#size-cells = <2>;
 
 
@@ -66,6 +70,7 @@ flash@1 {
 
 
 flash@2 {
 flash@2 {
 	partitions {
 	partitions {
+		compatible = "fixed-partitions";
 		#address-cells = <2>;
 		#address-cells = <2>;
 		#size-cells = <2>;
 		#size-cells = <2>;
 
 

+ 0 - 14
Documentation/networking/e100.txt

@@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
 If an issue is identified with the released source code on the supported
 If an issue is identified with the released source code on the supported
 kernel with a supported adapter, email the specific information related to the
 kernel with a supported adapter, email the specific information related to the
 issue to e1000-devel@lists.sourceforge.net.
 issue to e1000-devel@lists.sourceforge.net.
-
-
-License
-=======
-
-This software program is released under the terms of a license agreement
-between you ('Licensee') and Intel. Do not use or load this software or any
-associated materials (collectively, the 'Software') until you have carefully
-read the full terms and conditions of the file COPYING located in this software
-package. By loading or using the Software, you agree to the terms of this
-Agreement. If you do not agree with the terms of this Agreement, do not install
-or use the Software.
-
-* Other names and brands may be claimed as the property of others.

+ 18 - 2
MAINTAINERS

@@ -2975,6 +2975,7 @@ F:	kernel/cpuset.c
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:	Johannes Weiner <hannes@cmpxchg.org>
 M:	Johannes Weiner <hannes@cmpxchg.org>
 M:	Michal Hocko <mhocko@kernel.org>
 M:	Michal Hocko <mhocko@kernel.org>
+M:	Vladimir Davydov <vdavydov@virtuozzo.com>
 L:	cgroups@vger.kernel.org
 L:	cgroups@vger.kernel.org
 L:	linux-mm@kvack.org
 L:	linux-mm@kvack.org
 S:	Maintained
 S:	Maintained
@@ -5586,7 +5587,7 @@ R:	Jesse Brandeburg <jesse.brandeburg@intel.com>
 R:	Shannon Nelson <shannon.nelson@intel.com>
 R:	Shannon Nelson <shannon.nelson@intel.com>
 R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
 R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
 R:	Don Skidmore <donald.c.skidmore@intel.com>
 R:	Don Skidmore <donald.c.skidmore@intel.com>
-R:	Matthew Vick <matthew.vick@intel.com>
+R:	Bruce Allan <bruce.w.allan@intel.com>
 R:	John Ronciak <john.ronciak@intel.com>
 R:	John Ronciak <john.ronciak@intel.com>
 R:	Mitch Williams <mitch.a.williams@intel.com>
 R:	Mitch Williams <mitch.a.williams@intel.com>
 L:	intel-wired-lan@lists.osuosl.org
 L:	intel-wired-lan@lists.osuosl.org
@@ -8295,7 +8296,7 @@ F:	include/linux/delayacct.h
 F:	kernel/delayacct.c
 F:	kernel/delayacct.c
 
 
 PERFORMANCE EVENTS SUBSYSTEM
 PERFORMANCE EVENTS SUBSYSTEM
-M:	Peter Zijlstra <a.p.zijlstra@chello.nl>
+M:	Peter Zijlstra <peterz@infradead.org>
 M:	Ingo Molnar <mingo@redhat.com>
 M:	Ingo Molnar <mingo@redhat.com>
 M:	Arnaldo Carvalho de Melo <acme@kernel.org>
 M:	Arnaldo Carvalho de Melo <acme@kernel.org>
 L:	linux-kernel@vger.kernel.org
 L:	linux-kernel@vger.kernel.org
@@ -8388,6 +8389,14 @@ L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	drivers/pinctrl/samsung/
 F:	drivers/pinctrl/samsung/
 
 
+PIN CONTROLLER - SINGLE
+M:	Tony Lindgren <tony@atomide.com>
+M:	Haojian Zhuang <haojian.zhuang@linaro.org>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:	linux-omap@vger.kernel.org
+S:	Maintained
+F:	drivers/pinctrl/pinctrl-single.c
+
 PIN CONTROLLER - ST SPEAR
 PIN CONTROLLER - ST SPEAR
 M:	Viresh Kumar <vireshk@kernel.org>
 M:	Viresh Kumar <vireshk@kernel.org>
 L:	spear-devel@list.st.com
 L:	spear-devel@list.st.com
@@ -8954,6 +8963,13 @@ F:	drivers/rpmsg/
 F:	Documentation/rpmsg.txt
 F:	Documentation/rpmsg.txt
 F:	include/linux/rpmsg.h
 F:	include/linux/rpmsg.h
 
 
+RENESAS ETHERNET DRIVERS
+R:	Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+L:	netdev@vger.kernel.org
+L:	linux-sh@vger.kernel.org
+F:	drivers/net/ethernet/renesas/
+F:	include/linux/sh_eth.h
+
 RESET CONTROLLER FRAMEWORK
 RESET CONTROLLER FRAMEWORK
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 S:	Maintained
 S:	Maintained

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 4
 PATCHLEVEL = 4
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Blurry Fish Butt
 NAME = Blurry Fish Butt
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 0
arch/arc/Kconfig

@@ -445,6 +445,7 @@ config LINUX_LINK_BASE
 	  However some customers have peripherals mapped at this addr, so
 	  However some customers have peripherals mapped at this addr, so
 	  Linux needs to be scooted a bit.
 	  Linux needs to be scooted a bit.
 	  If you don't know what the above means, leave this setting alone.
 	  If you don't know what the above means, leave this setting alone.
+	  This needs to match memory start address specified in Device Tree
 
 
 config HIGHMEM
 config HIGHMEM
 	bool "High Memory Support"
 	bool "High Memory Support"

+ 1 - 0
arch/arc/boot/dts/axs10x_mb.dtsi

@@ -46,6 +46,7 @@
 			snps,pbl = < 32 >;
 			snps,pbl = < 32 >;
 			clocks = <&apbclk>;
 			clocks = <&apbclk>;
 			clock-names = "stmmaceth";
 			clock-names = "stmmaceth";
+			max-speed = <100>;
 		};
 		};
 
 
 		ehci@0x40000 {
 		ehci@0x40000 {

+ 2 - 1
arch/arc/boot/dts/nsim_hs.dts

@@ -17,7 +17,8 @@
 
 
 	memory {
 	memory {
 		device_type = "memory";
 		device_type = "memory";
-		reg = <0x0 0x80000000 0x0 0x40000000	/* 1 GB low mem */
+		/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
+		reg = <0x0 0x80000000 0x0 0x20000000	/* 512 MB low mem */
 		       0x1 0x00000000 0x0 0x40000000>;	/* 1 GB highmem */
 		       0x1 0x00000000 0x0 0x40000000>;	/* 1 GB highmem */
 	};
 	};
 
 

+ 2 - 2
arch/arc/include/asm/mach_desc.h

@@ -23,7 +23,7 @@
  * @dt_compat:		Array of device tree 'compatible' strings
  * @dt_compat:		Array of device tree 'compatible' strings
  * 			(XXX: although only 1st entry is looked at)
  * 			(XXX: although only 1st entry is looked at)
  * @init_early:		Very early callback [called from setup_arch()]
  * @init_early:		Very early callback [called from setup_arch()]
- * @init_cpu_smp:	for each CPU as it is coming up (SMP as well as UP)
+ * @init_per_cpu:	for each CPU as it is coming up (SMP as well as UP)
  * 			[(M):init_IRQ(), (o):start_kernel_secondary()]
  * 			[(M):init_IRQ(), (o):start_kernel_secondary()]
  * @init_machine:	arch initcall level callback (e.g. populate static
  * @init_machine:	arch initcall level callback (e.g. populate static
  * 			platform devices or parse Devicetree)
  * 			platform devices or parse Devicetree)
@@ -35,7 +35,7 @@ struct machine_desc {
 	const char		**dt_compat;
 	const char		**dt_compat;
 	void			(*init_early)(void);
 	void			(*init_early)(void);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-	void			(*init_cpu_smp)(unsigned int);
+	void			(*init_per_cpu)(unsigned int);
 #endif
 #endif
 	void			(*init_machine)(void);
 	void			(*init_machine)(void);
 	void			(*init_late)(void);
 	void			(*init_late)(void);

+ 2 - 2
arch/arc/include/asm/smp.h

@@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
  * @init_early_smp:	A SMP specific h/w block can init itself
  * @init_early_smp:	A SMP specific h/w block can init itself
  * 			Could be common across platforms so not covered by
  * 			Could be common across platforms so not covered by
  * 			mach_desc->init_early()
  * 			mach_desc->init_early()
- * @init_irq_cpu:	Called for each core so SMP h/w block driver can do
+ * @init_per_cpu:	Called for each core so SMP h/w block driver can do
  * 			any needed setup per cpu (e.g. IPI request)
  * 			any needed setup per cpu (e.g. IPI request)
  * @cpu_kick:		For Master to kickstart a cpu (optionally at a PC)
  * @cpu_kick:		For Master to kickstart a cpu (optionally at a PC)
  * @ipi_send:		To send IPI to a @cpu
  * @ipi_send:		To send IPI to a @cpu
@@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
 struct plat_smp_ops {
 struct plat_smp_ops {
 	const char 	*info;
 	const char 	*info;
 	void		(*init_early_smp)(void);
 	void		(*init_early_smp)(void);
-	void		(*init_irq_cpu)(int cpu);
+	void		(*init_per_cpu)(int cpu);
 	void		(*cpu_kick)(int cpu, unsigned long pc);
 	void		(*cpu_kick)(int cpu, unsigned long pc);
 	void		(*ipi_send)(int cpu);
 	void		(*ipi_send)(int cpu);
 	void		(*ipi_clear)(int irq);
 	void		(*ipi_clear)(int irq);

+ 0 - 4
arch/arc/include/asm/unwind.h

@@ -112,7 +112,6 @@ struct unwind_frame_info {
 
 
 extern int arc_unwind(struct unwind_frame_info *frame);
 extern int arc_unwind(struct unwind_frame_info *frame);
 extern void arc_unwind_init(void);
 extern void arc_unwind_init(void);
-extern void arc_unwind_setup(void);
 extern void *unwind_add_table(struct module *module, const void *table_start,
 extern void *unwind_add_table(struct module *module, const void *table_start,
 			      unsigned long table_size);
 			      unsigned long table_size);
 extern void unwind_remove_table(void *handle, int init_only);
 extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
 {
 {
 }
 }
 
 
-static inline void arc_unwind_setup(void)
-{
-}
 #define unwind_add_table(a, b, c)
 #define unwind_add_table(a, b, c)
 #define unwind_remove_table(a, b)
 #define unwind_remove_table(a, b)
 
 

+ 13 - 2
arch/arc/kernel/intc-arcv2.c

@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
 static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
 static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
 			 irq_hw_number_t hw)
 			 irq_hw_number_t hw)
 {
 {
-	if (irq == TIMER0_IRQ || irq == IPI_IRQ)
+	/*
+	 * core intc IRQs [16, 23]:
+	 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
+	 */
+	if (hw < 24) {
+		/*
+		 * A subsequent request_percpu_irq() fails if percpu_devid is
+		 * not set. That in turns sets NOAUTOEN, meaning each core needs
+		 * to call enable_percpu_irq()
+		 */
+		irq_set_percpu_devid(irq);
 		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
 		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
-	else
+	} else {
 		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
 		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
+	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 24 - 9
arch/arc/kernel/irq.c

@@ -29,11 +29,11 @@ void __init init_IRQ(void)
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	/* a SMP H/w block could do IPI IRQ request here */
 	/* a SMP H/w block could do IPI IRQ request here */
-	if (plat_smp_ops.init_irq_cpu)
-		plat_smp_ops.init_irq_cpu(smp_processor_id());
+	if (plat_smp_ops.init_per_cpu)
+		plat_smp_ops.init_per_cpu(smp_processor_id());
 
 
-	if (machine_desc->init_cpu_smp)
-		machine_desc->init_cpu_smp(smp_processor_id());
+	if (machine_desc->init_per_cpu)
+		machine_desc->init_per_cpu(smp_processor_id());
 #endif
 #endif
 }
 }
 
 
@@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);
 }
 }
 
 
+/*
+ * API called for requesting percpu interrupts - called by each CPU
+ *  - For boot CPU, actually request the IRQ with genirq core + enables
+ *  - For subsequent callers only enable called locally
+ *
+ * Relies on being called by boot cpu first (i.e. request called ahead) of
+ * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
+ * which are guaranteed to be setup on boot core first.
+ * Late probed peripherals such as perf can't use this as there no guarantee
+ * of being called on boot CPU first.
+ */
+
 void arc_request_percpu_irq(int irq, int cpu,
 void arc_request_percpu_irq(int irq, int cpu,
                             irqreturn_t (*isr)(int irq, void *dev),
                             irqreturn_t (*isr)(int irq, void *dev),
                             const char *irq_nm,
                             const char *irq_nm,
@@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
 	if (!cpu) {
 	if (!cpu) {
 		int rc;
 		int rc;
 
 
+#ifdef CONFIG_ISA_ARCOMPACT
 		/*
 		/*
-		 * These 2 calls are essential to making percpu IRQ APIs work
-		 * Ideally these details could be hidden in irq chip map function
-		 * but the issue is IPIs IRQs being static (non-DT) and platform
-		 * specific, so we can't identify them there.
+		 * A subsequent request_percpu_irq() fails if percpu_devid is
+		 * not set. That in turns sets NOAUTOEN, meaning each core needs
+		 * to call enable_percpu_irq()
+		 *
+		 * For ARCv2, this is done in irq map function since we know
+		 * which irqs are strictly per cpu
 		 */
 		 */
 		irq_set_percpu_devid(irq);
 		irq_set_percpu_devid(irq);
-		irq_modify_status(irq, IRQ_NOAUTOEN, 0);  /* @irq, @clr, @set */
+#endif
 
 
 		rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
 		rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
 		if (rc)
 		if (rc)

+ 1 - 1
arch/arc/kernel/mcip.c

@@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
 struct plat_smp_ops plat_smp_ops = {
 struct plat_smp_ops plat_smp_ops = {
 	.info		= smp_cpuinfo_buf,
 	.info		= smp_cpuinfo_buf,
 	.init_early_smp	= mcip_probe_n_setup,
 	.init_early_smp	= mcip_probe_n_setup,
-	.init_irq_cpu	= mcip_setup_per_cpu,
+	.init_per_cpu	= mcip_setup_per_cpu,
 	.ipi_send	= mcip_ipi_send,
 	.ipi_send	= mcip_ipi_send,
 	.ipi_clear	= mcip_ipi_clear,
 	.ipi_clear	= mcip_ipi_clear,
 };
 };

+ 9 - 23
arch/arc/kernel/perf_event.c

@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
 
 
 #endif /* CONFIG_ISA_ARCV2 */
 #endif /* CONFIG_ISA_ARCV2 */
 
 
-void arc_cpu_pmu_irq_init(void)
+static void arc_cpu_pmu_irq_init(void *data)
 {
 {
-	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+	int irq = *(int *)data;
 
 
-	arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
-			       "ARC perf counters", pmu_cpu);
+	enable_percpu_irq(irq, IRQ_TYPE_NONE);
 
 
 	/* Clear all pending interrupt flags */
 	/* Clear all pending interrupt flags */
 	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
 	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
 
 
 	if (has_interrupts) {
 	if (has_interrupts) {
 		int irq = platform_get_irq(pdev, 0);
 		int irq = platform_get_irq(pdev, 0);
-		unsigned long flags;
 
 
 		if (irq < 0) {
 		if (irq < 0) {
 			pr_err("Cannot get IRQ number for the platform\n");
 			pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
 
 
 		arc_pmu->irq = irq;
 		arc_pmu->irq = irq;
 
 
-		/*
-		 * arc_cpu_pmu_irq_init() needs to be called on all cores for
-		 * their respective local PMU.
-		 * However we use opencoded on_each_cpu() to ensure it is called
-		 * on core0 first, so that arc_request_percpu_irq() sets up
-		 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
-		 * perf IRQ on non master cores.
-		 * see arc_request_percpu_irq()
-		 */
-		preempt_disable();
-		local_irq_save(flags);
-		arc_cpu_pmu_irq_init();
-		local_irq_restore(flags);
-		smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
-		preempt_enable();
-
-		/* Clean all pending interrupt flags */
-		write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
+		/* intc map function ensures irq_set_percpu_devid() called */
+		request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
+				   this_cpu_ptr(&arc_pmu_cpu));
+
+		on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
+
 	} else
 	} else
 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 
 

+ 0 - 1
arch/arc/kernel/setup.c

@@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #endif
 
 
 	arc_unwind_init();
 	arc_unwind_init();
-	arc_unwind_setup();
 }
 }
 
 
 static int __init customize_machine(void)
 static int __init customize_machine(void)

+ 4 - 4
arch/arc/kernel/smp.c

@@ -132,11 +132,11 @@ void start_kernel_secondary(void)
 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
 
 
 	/* Some SMP H/w setup - for each cpu */
 	/* Some SMP H/w setup - for each cpu */
-	if (plat_smp_ops.init_irq_cpu)
-		plat_smp_ops.init_irq_cpu(cpu);
+	if (plat_smp_ops.init_per_cpu)
+		plat_smp_ops.init_per_cpu(cpu);
 
 
-	if (machine_desc->init_cpu_smp)
-		machine_desc->init_cpu_smp(cpu);
+	if (machine_desc->init_per_cpu)
+		machine_desc->init_per_cpu(cpu);
 
 
 	arc_local_timer_setup();
 	arc_local_timer_setup();
 
 

+ 35 - 18
arch/arc/kernel/unwind.c

@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
 
 
 static unsigned long read_pointer(const u8 **pLoc,
 static unsigned long read_pointer(const u8 **pLoc,
 				  const void *end, signed ptrType);
 				  const void *end, signed ptrType);
+static void init_unwind_hdr(struct unwind_table *table,
+			    void *(*alloc) (unsigned long));
+
+/*
+ * wrappers for header alloc (vs. calling one vs. other at call site)
+ * to elide section mismatches warnings
+ */
+static void *__init unw_hdr_alloc_early(unsigned long sz)
+{
+	return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
+				       MAX_DMA_ADDRESS);
+}
+
+static void *unw_hdr_alloc(unsigned long sz)
+{
+	return kmalloc(sz, GFP_KERNEL);
+}
 
 
 static void init_unwind_table(struct unwind_table *table, const char *name,
 static void init_unwind_table(struct unwind_table *table, const char *name,
 			      const void *core_start, unsigned long core_size,
 			      const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
 			  __start_unwind, __end_unwind - __start_unwind,
 			  __start_unwind, __end_unwind - __start_unwind,
 			  NULL, 0);
 			  NULL, 0);
 	  /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
 	  /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+
+	init_unwind_hdr(&root_table, unw_hdr_alloc_early);
 }
 }
 
 
 static const u32 bad_cie, not_fde;
 static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
 	e2->fde = v;
 	e2->fde = v;
 }
 }
 
 
-static void __init setup_unwind_table(struct unwind_table *table,
-				      void *(*alloc) (unsigned long))
+static void init_unwind_hdr(struct unwind_table *table,
+			    void *(*alloc) (unsigned long))
 {
 {
 	const u8 *ptr;
 	const u8 *ptr;
 	unsigned long tableSize = table->size, hdrSize;
 	unsigned long tableSize = table->size, hdrSize;
@@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
 		const u32 *cie = cie_for_fde(fde, table);
 		const u32 *cie = cie_for_fde(fde, table);
 		signed ptrType;
 		signed ptrType;
 
 
-		if (cie == &not_fde)
+		if (cie == &not_fde)	/* only process FDE here */
 			continue;
 			continue;
 		if (cie == NULL || cie == &bad_cie)
 		if (cie == NULL || cie == &bad_cie)
-			return;
+			continue;	/* say FDE->CIE.version != 1 */
 		ptrType = fde_pointer_type(cie);
 		ptrType = fde_pointer_type(cie);
 		if (ptrType < 0)
 		if (ptrType < 0)
-			return;
+			continue;
 
 
 		ptr = (const u8 *)(fde + 2);
 		ptr = (const u8 *)(fde + 2);
 		if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
 		if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
 
 
 	hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
 	hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
 	    + 2 * n * sizeof(unsigned long);
 	    + 2 * n * sizeof(unsigned long);
+
 	header = alloc(hdrSize);
 	header = alloc(hdrSize);
 	if (!header)
 	if (!header)
 		return;
 		return;
+
 	header->version = 1;
 	header->version = 1;
 	header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
 	header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
 	header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
 	header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
 
 
 		if (fde[1] == 0xffffffff)
 		if (fde[1] == 0xffffffff)
 			continue;	/* this is a CIE */
 			continue;	/* this is a CIE */
+
+		if (*(u8 *)(cie + 2) != 1)
+			continue;	/* FDE->CIE.version not supported */
+
 		ptr = (const u8 *)(fde + 2);
 		ptr = (const u8 *)(fde + 2);
 		header->table[n].start = read_pointer(&ptr,
 		header->table[n].start = read_pointer(&ptr,
 						      (const u8 *)(fde + 1) +
 						      (const u8 *)(fde + 1) +
@@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
 	table->header = (const void *)header;
 	table->header = (const void *)header;
 }
 }
 
 
-static void *__init balloc(unsigned long sz)
-{
-	return __alloc_bootmem_nopanic(sz,
-				       sizeof(unsigned int),
-				       __pa(MAX_DMA_ADDRESS));
-}
-
-void __init arc_unwind_setup(void)
-{
-	setup_unwind_table(&root_table, balloc);
-}
-
 #ifdef CONFIG_MODULES
 #ifdef CONFIG_MODULES
 
 
 static struct unwind_table *last_table;
 static struct unwind_table *last_table;
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
 			  table_start, table_size,
 			  table_start, table_size,
 			  NULL, 0);
 			  NULL, 0);
 
 
+	init_unwind_hdr(table, unw_hdr_alloc);
+
 #ifdef UNWIND_DEBUG
 #ifdef UNWIND_DEBUG
 	unw_debug("Table added for [%s] %lx %lx\n",
 	unw_debug("Table added for [%s] %lx %lx\n",
 		module->name, table->core.pc, table->core.range);
 		module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
 	info.init_only = init_only;
 	info.init_only = init_only;
 
 
 	unlink_table(&info); /* XXX: SMP */
 	unlink_table(&info); /* XXX: SMP */
+	kfree(table->header);
 	kfree(table);
 	kfree(table);
 }
 }
 
 
@@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
 
 
 	if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
 	if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
 	    || (*cie & (sizeof(*cie) - 1))
 	    || (*cie & (sizeof(*cie) - 1))
-	    || (cie[1] != 0xffffffff))
+	    || (cie[1] != 0xffffffff)
+	    || ( *(u8 *)(cie + 2) != 1))   /* version 1 supported */
 		return NULL;	/* this is not a (valid) CIE */
 		return NULL;	/* this is not a (valid) CIE */
 	return cie;
 	return cie;
 }
 }

+ 3 - 1
arch/arc/mm/init.c

@@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 	int in_use = 0;
 	int in_use = 0;
 
 
 	if (!low_mem_sz) {
 	if (!low_mem_sz) {
-		BUG_ON(base != low_mem_start);
+		if (base != low_mem_start)
+			panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
+
 		low_mem_sz = size;
 		low_mem_sz = size;
 		in_use = 1;
 		in_use = 1;
 	} else {
 	} else {

+ 2 - 2
arch/arm/boot/dts/am4372.dtsi

@@ -74,7 +74,7 @@
 		reg = <0x48240200 0x100>;
 		reg = <0x48240200 0x100>;
 		interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
 		interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
-		clocks = <&dpll_mpu_m2_ck>;
+		clocks = <&mpu_periphclk>;
 	};
 	};
 
 
 	local_timer: timer@48240600 {
 	local_timer: timer@48240600 {
@@ -82,7 +82,7 @@
 		reg = <0x48240600 0x100>;
 		reg = <0x48240600 0x100>;
 		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
 		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-parent = <&gic>;
 		interrupt-parent = <&gic>;
-		clocks = <&dpll_mpu_m2_ck>;
+		clocks = <&mpu_periphclk>;
 	};
 	};
 
 
 	l2-cache-controller@48242000 {
 	l2-cache-controller@48242000 {

+ 8 - 0
arch/arm/boot/dts/am43xx-clocks.dtsi

@@ -259,6 +259,14 @@
 		ti,invert-autoidle-bit;
 		ti,invert-autoidle-bit;
 	};
 	};
 
 
+	mpu_periphclk: mpu_periphclk {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clocks = <&dpll_mpu_m2_ck>;
+		clock-mult = <1>;
+		clock-div = <2>;
+	};
+
 	dpll_ddr_ck: dpll_ddr_ck {
 	dpll_ddr_ck: dpll_ddr_ck {
 		#clock-cells = <0>;
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		compatible = "ti,am3-dpll-clock";

+ 1 - 0
arch/arm/boot/dts/at91-sama5d2_xplained.dts

@@ -184,6 +184,7 @@
 							regulator-name = "VDD_SDHC_1V8";
 							regulator-name = "VDD_SDHC_1V8";
 							regulator-min-microvolt = <1800000>;
 							regulator-min-microvolt = <1800000>;
 							regulator-max-microvolt = <1800000>;
 							regulator-max-microvolt = <1800000>;
+							regulator-always-on;
 						};
 						};
 					};
 					};
 				};
 				};

+ 5 - 3
arch/arm/boot/dts/berlin2q.dtsi

@@ -118,7 +118,8 @@
 		sdhci0: sdhci@ab0000 {
 		sdhci0: sdhci@ab0000 {
 			compatible = "mrvl,pxav3-mmc";
 			compatible = "mrvl,pxav3-mmc";
 			reg = <0xab0000 0x200>;
 			reg = <0xab0000 0x200>;
-			clocks = <&chip_clk CLKID_SDIO1XIN>;
+			clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
+			clock-names = "io", "core";
 			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 			status = "disabled";
 		};
 		};
@@ -126,7 +127,8 @@
 		sdhci1: sdhci@ab0800 {
 		sdhci1: sdhci@ab0800 {
 			compatible = "mrvl,pxav3-mmc";
 			compatible = "mrvl,pxav3-mmc";
 			reg = <0xab0800 0x200>;
 			reg = <0xab0800 0x200>;
-			clocks = <&chip_clk CLKID_SDIO1XIN>;
+			clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
+			clock-names = "io", "core";
 			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 			status = "disabled";
 		};
 		};
@@ -135,7 +137,7 @@
 			compatible = "mrvl,pxav3-mmc";
 			compatible = "mrvl,pxav3-mmc";
 			reg = <0xab1000 0x200>;
 			reg = <0xab1000 0x200>;
 			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
 			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
+			clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
 			clock-names = "io", "core";
 			clock-names = "io", "core";
 			status = "disabled";
 			status = "disabled";
 		};
 		};

+ 6 - 2
arch/arm/boot/dts/dm816x.dtsi

@@ -218,6 +218,7 @@
 			reg = <0x480c8000 0x2000>;
 			reg = <0x480c8000 0x2000>;
 			interrupts = <77>;
 			interrupts = <77>;
 			ti,hwmods = "mailbox";
 			ti,hwmods = "mailbox";
+			#mbox-cells = <1>;
 			ti,mbox-num-users = <4>;
 			ti,mbox-num-users = <4>;
 			ti,mbox-num-fifos = <12>;
 			ti,mbox-num-fifos = <12>;
 			mbox_dsp: mbox_dsp {
 			mbox_dsp: mbox_dsp {
@@ -279,8 +280,11 @@
 			ti,spi-num-cs = <4>;
 			ti,spi-num-cs = <4>;
 			ti,hwmods = "mcspi1";
 			ti,hwmods = "mcspi1";
 			dmas = <&edma 16 &edma 17
 			dmas = <&edma 16 &edma 17
-				&edma 18 &edma 19>;
-			dma-names = "tx0", "rx0", "tx1", "rx1";
+				&edma 18 &edma 19
+				&edma 20 &edma 21
+				&edma 22 &edma 23>;
+			dma-names = "tx0", "rx0", "tx1", "rx1",
+				    "tx2", "rx2", "tx3", "rx3";
 		};
 		};
 
 
 		mmc1: mmc@48060000 {
 		mmc1: mmc@48060000 {

+ 0 - 5
arch/arm/boot/dts/vf610-colibri.dtsi

@@ -18,8 +18,3 @@
 		reg = <0x80000000 0x10000000>;
 		reg = <0x80000000 0x10000000>;
 	};
 	};
 };
 };
-
-&L2 {
-	arm,data-latency = <2 1 2>;
-	arm,tag-latency = <3 2 3>;
-};

+ 1 - 1
arch/arm/boot/dts/vf610.dtsi

@@ -19,7 +19,7 @@
 		reg = <0x40006000 0x1000>;
 		reg = <0x40006000 0x1000>;
 		cache-unified;
 		cache-unified;
 		cache-level = <2>;
 		cache-level = <2>;
-		arm,data-latency = <1 1 1>;
+		arm,data-latency = <3 3 3>;
 		arm,tag-latency = <2 2 2>;
 		arm,tag-latency = <2 2 2>;
 	};
 	};
 };
 };

+ 4 - 2
arch/arm/boot/dts/vfxxx.dtsi

@@ -178,8 +178,10 @@
 				compatible = "fsl,vf610-sai";
 				compatible = "fsl,vf610-sai";
 				reg = <0x40031000 0x1000>;
 				reg = <0x40031000 0x1000>;
 				interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
 				interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&clks VF610_CLK_SAI2>;
-				clock-names = "sai";
+				clocks = <&clks VF610_CLK_SAI2>,
+					<&clks VF610_CLK_SAI2_DIV>,
+					<&clks 0>, <&clks 0>;
+				clock-names = "bus", "mclk1", "mclk2", "mclk3";
 				dma-names = "tx", "rx";
 				dma-names = "tx", "rx";
 				dmas = <&edma0 0 21>,
 				dmas = <&edma0 0 21>,
 					<&edma0 0 20>;
 					<&edma0 0 20>;

+ 1 - 0
arch/arm/include/asm/arch_gicv3.h

@@ -21,6 +21,7 @@
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
 #include <linux/io.h>
 #include <linux/io.h>
+#include <asm/barrier.h>
 
 
 #define __ACCESS_CP15(CRn, Op1, CRm, Op2)	p15, Op1, %0, CRn, CRm, Op2
 #define __ACCESS_CP15(CRn, Op1, CRm, Op2)	p15, Op1, %0, CRn, CRm, Op2
 #define __ACCESS_CP15_64(Op1, CRm)		p15, Op1, %Q0, %R0, CRm
 #define __ACCESS_CP15_64(Op1, CRm)		p15, Op1, %Q0, %R0, CRm

+ 4 - 0
arch/arm/include/asm/uaccess.h

@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 static inline unsigned long __must_check
 static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
 	unsigned int __ua_flags = uaccess_save_and_enable();
 	unsigned int __ua_flags = uaccess_save_and_enable();
 	n = arm_copy_to_user(to, from, n);
 	n = arm_copy_to_user(to, from, n);
 	uaccess_restore(__ua_flags);
 	uaccess_restore(__ua_flags);
 	return n;
 	return n;
+#else
+	return arm_copy_to_user(to, from, n);
+#endif
 }
 }
 
 
 extern unsigned long __must_check
 extern unsigned long __must_check

+ 18 - 15
arch/arm/kernel/process.c

@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	char buf[64];
 	char buf[64];
+#ifndef CONFIG_CPU_V7M
+	unsigned int domain;
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Get the domain register for the parent context. In user
+	 * mode, we don't save the DACR, so lets use what it should
+	 * be. For other modes, we place it after the pt_regs struct.
+	 */
+	if (user_mode(regs))
+		domain = DACR_UACCESS_ENABLE;
+	else
+		domain = *(unsigned int *)(regs + 1);
+#else
+	domain = get_domain();
+#endif
+#endif
 
 
 	show_regs_print_info(KERN_DEFAULT);
 	show_regs_print_info(KERN_DEFAULT);
 
 
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
 
 
 #ifndef CONFIG_CPU_V7M
 #ifndef CONFIG_CPU_V7M
 	{
 	{
-		unsigned int domain = get_domain();
 		const char *segment;
 		const char *segment;
 
 
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
-		/*
-		 * Get the domain register for the parent context. In user
-		 * mode, we don't save the DACR, so lets use what it should
-		 * be. For other modes, we place it after the pt_regs struct.
-		 */
-		if (user_mode(regs))
-			domain = DACR_UACCESS_ENABLE;
-		else
-			domain = *(unsigned int *)(regs + 1);
-#endif
-
 		if ((domain & domain_mask(DOMAIN_USER)) ==
 		if ((domain & domain_mask(DOMAIN_USER)) ==
 		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
 		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
 			segment = "none";
 			segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
 		buf[0] = '\0';
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 #ifdef CONFIG_CPU_CP15_MMU
 		{
 		{
-			unsigned int transbase, dac = get_domain();
+			unsigned int transbase;
 			asm("mrc p15, 0, %0, c2, c0\n\t"
 			asm("mrc p15, 0, %0, c2, c0\n\t"
 			    : "=r" (transbase));
 			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
-			  	transbase, dac);
+				transbase, domain);
 		}
 		}
 #endif
 #endif
 		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
 		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));

+ 3 - 3
arch/arm/kernel/swp_emulate.c

@@ -36,10 +36,10 @@
  */
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
-	"	mov		%2, %1\n"			\
-	"0:	ldrex"B"	%1, [%3]\n"			\
-	"1:	strex"B"	%0, %2, [%3]\n"			\
+	"0:	ldrex"B"	%2, [%3]\n"			\
+	"1:	strex"B"	%0, %1, [%3]\n"			\
 	"	cmp		%0, #0\n"			\
 	"	cmp		%0, #0\n"			\
+	"	moveq		%1, %2\n"			\
 	"	movne		%0, %4\n"			\
 	"	movne		%0, %4\n"			\
 	"2:\n"							\
 	"2:\n"							\
 	"	.section	 .text.fixup,\"ax\"\n"		\
 	"	.section	 .text.fixup,\"ax\"\n"		\

+ 23 - 6
arch/arm/lib/uaccess_with_memcpy.c

@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
 static unsigned long noinline
 static unsigned long noinline
 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 {
 {
+	unsigned long ua_flags;
 	int atomic;
 	int atomic;
 
 
 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 		if (tocopy > n)
 		if (tocopy > n)
 			tocopy = n;
 			tocopy = n;
 
 
+		ua_flags = uaccess_save_and_enable();
 		memcpy((void *)to, from, tocopy);
 		memcpy((void *)to, from, tocopy);
+		uaccess_restore(ua_flags);
 		to += tocopy;
 		to += tocopy;
 		from += tocopy;
 		from += tocopy;
 		n -= tocopy;
 		n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 	 * With frame pointer disabled, tail call optimization kicks in
 	 * With frame pointer disabled, tail call optimization kicks in
 	 * as well making this test almost invisible.
 	 * as well making this test almost invisible.
 	 */
 	 */
-	if (n < 64)
-		return __copy_to_user_std(to, from, n);
-	return __copy_to_user_memcpy(to, from, n);
+	if (n < 64) {
+		unsigned long ua_flags = uaccess_save_and_enable();
+		n = __copy_to_user_std(to, from, n);
+		uaccess_restore(ua_flags);
+	} else {
+		n = __copy_to_user_memcpy(to, from, n);
+	}
+	return n;
 }
 }
 	
 	
 static unsigned long noinline
 static unsigned long noinline
 __clear_user_memset(void __user *addr, unsigned long n)
 __clear_user_memset(void __user *addr, unsigned long n)
 {
 {
+	unsigned long ua_flags;
+
 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 		memset((void *)addr, 0, n);
 		memset((void *)addr, 0, n);
 		return 0;
 		return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
 		if (tocopy > n)
 		if (tocopy > n)
 			tocopy = n;
 			tocopy = n;
 
 
+		ua_flags = uaccess_save_and_enable();
 		memset((void *)addr, 0, tocopy);
 		memset((void *)addr, 0, tocopy);
+		uaccess_restore(ua_flags);
 		addr += tocopy;
 		addr += tocopy;
 		n -= tocopy;
 		n -= tocopy;
 
 
@@ -193,9 +205,14 @@ out:
 unsigned long arm_clear_user(void __user *addr, unsigned long n)
 unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 {
 	/* See rational for this in __copy_to_user() above. */
 	/* See rational for this in __copy_to_user() above. */
-	if (n < 64)
-		return __clear_user_std(addr, n);
-	return __clear_user_memset(addr, n);
+	if (n < 64) {
+		unsigned long ua_flags = uaccess_save_and_enable();
+		n = __clear_user_std(addr, n);
+		uaccess_restore(ua_flags);
+	} else {
+		n = __clear_user_memset(addr, n);
+	}
+	return n;
 }
 }
 
 
 #if 0
 #if 0

+ 5 - 1
arch/arm/mach-at91/Kconfig

@@ -4,7 +4,6 @@ menuconfig ARCH_AT91
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select COMMON_CLK_AT91
 	select COMMON_CLK_AT91
 	select PINCTRL
 	select PINCTRL
-	select PINCTRL_AT91
 	select SOC_BUS
 	select SOC_BUS
 
 
 if ARCH_AT91
 if ARCH_AT91
@@ -17,6 +16,7 @@ config SOC_SAMA5D2
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_H32MX
 	select HAVE_AT91_H32MX
 	select HAVE_AT91_GENERATED_CLK
 	select HAVE_AT91_GENERATED_CLK
+	select PINCTRL_AT91PIO4
 	help
 	help
 	  Select this if ou are using one of Atmel's SAMA5D2 family SoC.
 	  Select this if ou are using one of Atmel's SAMA5D2 family SoC.
 
 
@@ -27,6 +27,7 @@ config SOC_SAMA5D3
 	select HAVE_AT91_UTMI
 	select HAVE_AT91_UTMI
 	select HAVE_AT91_SMD
 	select HAVE_AT91_SMD
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_USB_CLK
+	select PINCTRL_AT91
 	help
 	help
 	  Select this if you are using one of Atmel's SAMA5D3 family SoC.
 	  Select this if you are using one of Atmel's SAMA5D3 family SoC.
 	  This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
 	  This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@@ -40,6 +41,7 @@ config SOC_SAMA5D4
 	select HAVE_AT91_SMD
 	select HAVE_AT91_SMD
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_H32MX
 	select HAVE_AT91_H32MX
+	select PINCTRL_AT91
 	help
 	help
 	  Select this if you are using one of Atmel's SAMA5D4 family SoC.
 	  Select this if you are using one of Atmel's SAMA5D4 family SoC.
 
 
@@ -50,6 +52,7 @@ config SOC_AT91RM9200
 	select CPU_ARM920T
 	select CPU_ARM920T
 	select HAVE_AT91_USB_CLK
 	select HAVE_AT91_USB_CLK
 	select MIGHT_HAVE_PCI
 	select MIGHT_HAVE_PCI
+	select PINCTRL_AT91
 	select SOC_SAM_V4_V5
 	select SOC_SAM_V4_V5
 	select SRAM if PM
 	select SRAM if PM
 	help
 	help
@@ -65,6 +68,7 @@ config SOC_AT91SAM9
 	select HAVE_AT91_UTMI
 	select HAVE_AT91_UTMI
 	select HAVE_FB_ATMEL
 	select HAVE_FB_ATMEL
 	select MEMORY
 	select MEMORY
+	select PINCTRL_AT91
 	select SOC_SAM_V4_V5
 	select SOC_SAM_V4_V5
 	select SRAM if PM
 	select SRAM if PM
 	help
 	help

+ 6 - 1
arch/arm/mach-at91/pm.c

@@ -41,8 +41,10 @@
  * implementation should be moved down into the pinctrl driver and get
  * implementation should be moved down into the pinctrl driver and get
  * called as part of the generic suspend/resume path.
  * called as part of the generic suspend/resume path.
  */
  */
+#ifdef CONFIG_PINCTRL_AT91
 extern void at91_pinctrl_gpio_suspend(void);
 extern void at91_pinctrl_gpio_suspend(void);
 extern void at91_pinctrl_gpio_resume(void);
 extern void at91_pinctrl_gpio_resume(void);
+#endif
 
 
 static struct {
 static struct {
 	unsigned long uhp_udp_mask;
 	unsigned long uhp_udp_mask;
@@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
 
 
 static int at91_pm_enter(suspend_state_t state)
 static int at91_pm_enter(suspend_state_t state)
 {
 {
+#ifdef CONFIG_PINCTRL_AT91
 	at91_pinctrl_gpio_suspend();
 	at91_pinctrl_gpio_suspend();
-
+#endif
 	switch (state) {
 	switch (state) {
 	/*
 	/*
 	 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
 	 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
@@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
 error:
 error:
 	target_state = PM_SUSPEND_ON;
 	target_state = PM_SUSPEND_ON;
 
 
+#ifdef CONFIG_PINCTRL_AT91
 	at91_pinctrl_gpio_resume();
 	at91_pinctrl_gpio_resume();
+#endif
 	return 0;
 	return 0;
 }
 }
 
 

+ 5 - 1
arch/arm/mach-exynos/pmu.c

@@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
 void exynos_sys_powerdown_conf(enum sys_powerdown mode)
 {
 {
 	unsigned int i;
 	unsigned int i;
+	const struct exynos_pmu_data *pmu_data;
+
+	if (!pmu_context)
+		return;
 
 
-	const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
+	pmu_data = pmu_context->pmu_data;
 
 
 	if (pmu_data->powerdown_conf)
 	if (pmu_data->powerdown_conf)
 		pmu_data->powerdown_conf(mode);
 		pmu_data->powerdown_conf(mode);

+ 6 - 6
arch/arm/mach-ixp4xx/include/mach/io.h

@@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
 		writel(*vaddr++, bus_addr);
 		writel(*vaddr++, bus_addr);
 }
 }
 
 
-static inline unsigned char __indirect_readb(const volatile void __iomem *p)
+static inline u8 __indirect_readb(const volatile void __iomem *p)
 {
 {
 	u32 addr = (u32)p;
 	u32 addr = (u32)p;
 	u32 n, byte_enables, data;
 	u32 n, byte_enables, data;
@@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
 		*vaddr++ = readb(bus_addr);
 		*vaddr++ = readb(bus_addr);
 }
 }
 
 
-static inline unsigned short __indirect_readw(const volatile void __iomem *p)
+static inline u16 __indirect_readw(const volatile void __iomem *p)
 {
 {
 	u32 addr = (u32)p;
 	u32 addr = (u32)p;
 	u32 n, byte_enables, data;
 	u32 n, byte_enables, data;
@@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
 		*vaddr++ = readw(bus_addr);
 		*vaddr++ = readw(bus_addr);
 }
 }
 
 
-static inline unsigned long __indirect_readl(const volatile void __iomem *p)
+static inline u32 __indirect_readl(const volatile void __iomem *p)
 {
 {
 	u32 addr = (__force u32)p;
 	u32 addr = (__force u32)p;
 	u32 data;
 	u32 data;
@@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
 					((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
 					((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
 
 
 #define	ioread8(p)			ioread8(p)
 #define	ioread8(p)			ioread8(p)
-static inline unsigned int ioread8(const void __iomem *addr)
+static inline u8 ioread8(const void __iomem *addr)
 {
 {
 	unsigned long port = (unsigned long __force)addr;
 	unsigned long port = (unsigned long __force)addr;
 	if (__is_io_address(port))
 	if (__is_io_address(port))
@@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
 }
 }
 
 
 #define	ioread16(p)			ioread16(p)
 #define	ioread16(p)			ioread16(p)
-static inline unsigned int ioread16(const void __iomem *addr)
+static inline u16 ioread16(const void __iomem *addr)
 {
 {
 	unsigned long port = (unsigned long __force)addr;
 	unsigned long port = (unsigned long __force)addr;
 	if (__is_io_address(port))
 	if (__is_io_address(port))
@@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
 }
 }
 
 
 #define	ioread32(p)			ioread32(p)
 #define	ioread32(p)			ioread32(p)
-static inline unsigned int ioread32(const void __iomem *addr)
+static inline u32 ioread32(const void __iomem *addr)
 {
 {
 	unsigned long port = (unsigned long __force)addr;
 	unsigned long port = (unsigned long __force)addr;
 	if (__is_io_address(port))
 	if (__is_io_address(port))

+ 1 - 1
arch/arm/mach-omap2/Kconfig

@@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
 	select NEON if CPU_V7
 	select NEON if CPU_V7
 	select PM
 	select PM
 	select REGULATOR
 	select REGULATOR
+	select REGULATOR_FIXED_VOLTAGE
 	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
 	select VFP
 	select VFP
@@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
 	depends on ARCH_OMAP3
 	depends on ARCH_OMAP3
 	default y
 	default y
 	select OMAP_PACKAGE_CBB
 	select OMAP_PACKAGE_CBB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 
 config MACH_NOKIA_N810
 config MACH_NOKIA_N810
        bool
        bool

+ 5 - 0
arch/arm/mach-pxa/ezx.c

@@ -889,6 +889,7 @@ static void __init e680_init(void)
 
 
 	pxa_set_keypad_info(&e680_keypad_platform_data);
 	pxa_set_keypad_info(&e680_keypad_platform_data);
 
 
+	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e680_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e680_devices));
 }
 }
@@ -956,6 +957,7 @@ static void __init a1200_init(void)
 
 
 	pxa_set_keypad_info(&a1200_keypad_platform_data);
 	pxa_set_keypad_info(&a1200_keypad_platform_data);
 
 
+	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
 }
 }
@@ -1148,6 +1150,7 @@ static void __init a910_init(void)
 		platform_device_register(&a910_camera);
 		platform_device_register(&a910_camera);
 	}
 	}
 
 
+	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a910_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a910_devices));
 }
 }
@@ -1215,6 +1218,7 @@ static void __init e6_init(void)
 
 
 	pxa_set_keypad_info(&e6_keypad_platform_data);
 	pxa_set_keypad_info(&e6_keypad_platform_data);
 
 
+	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e6_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e6_devices));
 }
 }
@@ -1256,6 +1260,7 @@ static void __init e2_init(void)
 
 
 	pxa_set_keypad_info(&e2_keypad_platform_data);
 	pxa_set_keypad_info(&e2_keypad_platform_data);
 
 
+	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e2_devices));
 	platform_add_devices(ARRAY_AND_SIZE(e2_devices));
 }
 }

+ 1 - 1
arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c

@@ -20,7 +20,7 @@
 #include <plat/cpu.h>
 #include <plat/cpu.h>
 #include <plat/cpu-freq-core.h>
 #include <plat/cpu-freq-core.h>
 
 
-static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
+static struct cpufreq_frequency_table s3c2440_plls_12[] = {
 	{ .frequency = 75000000,	.driver_data = PLLVAL(0x75, 3, 3),  }, 	/* FVco 600.000000 */
 	{ .frequency = 75000000,	.driver_data = PLLVAL(0x75, 3, 3),  }, 	/* FVco 600.000000 */
 	{ .frequency = 80000000,	.driver_data = PLLVAL(0x98, 4, 3),  }, 	/* FVco 640.000000 */
 	{ .frequency = 80000000,	.driver_data = PLLVAL(0x98, 4, 3),  }, 	/* FVco 640.000000 */
 	{ .frequency = 90000000,	.driver_data = PLLVAL(0x70, 2, 3),  }, 	/* FVco 720.000000 */
 	{ .frequency = 90000000,	.driver_data = PLLVAL(0x70, 2, 3),  }, 	/* FVco 720.000000 */

+ 1 - 1
arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c

@@ -20,7 +20,7 @@
 #include <plat/cpu.h>
 #include <plat/cpu.h>
 #include <plat/cpu-freq-core.h>
 #include <plat/cpu-freq-core.h>
 
 
-static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
+static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
 	{ .frequency = 78019200,	.driver_data = PLLVAL(121, 5, 3), 	}, 	/* FVco 624.153600 */
 	{ .frequency = 78019200,	.driver_data = PLLVAL(121, 5, 3), 	}, 	/* FVco 624.153600 */
 	{ .frequency = 84067200,	.driver_data = PLLVAL(131, 5, 3), 	}, 	/* FVco 672.537600 */
 	{ .frequency = 84067200,	.driver_data = PLLVAL(131, 5, 3), 	}, 	/* FVco 672.537600 */
 	{ .frequency = 90115200,	.driver_data = PLLVAL(141, 5, 3), 	}, 	/* FVco 720.921600 */
 	{ .frequency = 90115200,	.driver_data = PLLVAL(141, 5, 3), 	}, 	/* FVco 720.921600 */

+ 26 - 12
arch/arm/mm/context.c

@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
 		__flush_icache_all();
 		__flush_icache_all();
 }
 }
 
 
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
 {
 {
 	int cpu;
 	int cpu;
-	for_each_possible_cpu(cpu)
-		if (per_cpu(reserved_asids, cpu) == asid)
-			return 1;
-	return 0;
+	bool hit = false;
+
+	/*
+	 * Iterate over the set of reserved ASIDs looking for a match.
+	 * If we find one, then we can update our mm to use newasid
+	 * (i.e. the same ASID in the current generation) but we can't
+	 * exit the loop early, since we need to ensure that all copies
+	 * of the old ASID are updated to reflect the mm. Failure to do
+	 * so could result in us missing the reserved ASID in a future
+	 * generation.
+	 */
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(reserved_asids, cpu) == asid) {
+			hit = true;
+			per_cpu(reserved_asids, cpu) = newasid;
+		}
+	}
+
+	return hit;
 }
 }
 
 
 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 	u64 generation = atomic64_read(&asid_generation);
 	u64 generation = atomic64_read(&asid_generation);
 
 
 	if (asid != 0) {
 	if (asid != 0) {
+		u64 newasid = generation | (asid & ~ASID_MASK);
+
 		/*
 		/*
 		 * If our current ASID was active during a rollover, we
 		 * If our current ASID was active during a rollover, we
 		 * can continue to use it and this was just a false alarm.
 		 * can continue to use it and this was just a false alarm.
 		 */
 		 */
-		if (is_reserved_asid(asid))
-			return generation | (asid & ~ASID_MASK);
+		if (check_update_reserved_asid(asid, newasid))
+			return newasid;
 
 
 		/*
 		/*
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 		 */
 		 */
 		asid &= ~ASID_MASK;
 		asid &= ~ASID_MASK;
 		if (!__test_and_set_bit(asid, asid_map))
 		if (!__test_and_set_bit(asid, asid_map))
-			goto bump_gen;
+			return newasid;
 	}
 	}
 
 
 	/*
 	/*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 
 
 	__set_bit(asid, asid_map);
 	__set_bit(asid, asid_map);
 	cur_idx = asid;
 	cur_idx = asid;
-
-bump_gen:
-	asid |= generation;
 	cpumask_clear(mm_cpumask(mm));
 	cpumask_clear(mm_cpumask(mm));
-	return asid;
+	return asid | generation;
 }
 }
 
 
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)

+ 1 - 1
arch/arm/mm/dma-mapping.c

@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
-		phys_addr_t phys = sg_phys(s) & PAGE_MASK;
+		phys_addr_t phys = page_to_phys(sg_page(s));
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 
 		if (!is_coherent &&
 		if (!is_coherent &&

+ 62 - 30
arch/arm/mm/init.c

@@ -22,6 +22,7 @@
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/dma-contiguous.h>
 #include <linux/dma-contiguous.h>
 #include <linux/sizes.h>
 #include <linux/sizes.h>
+#include <linux/stop_machine.h>
 
 
 #include <asm/cp15.h>
 #include <asm/cp15.h>
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
  * safe to be called with preemption disabled, as under stop_machine().
  * safe to be called with preemption disabled, as under stop_machine().
  */
  */
 static inline void section_update(unsigned long addr, pmdval_t mask,
 static inline void section_update(unsigned long addr, pmdval_t mask,
-				  pmdval_t prot)
+				  pmdval_t prot, struct mm_struct *mm)
 {
 {
-	struct mm_struct *mm;
 	pmd_t *pmd;
 	pmd_t *pmd;
 
 
-	mm = current->active_mm;
 	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
 	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
 
 
 #ifdef CONFIG_ARM_LPAE
 #ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
 	return !!(get_cr() & CR_XP);
 	return !!(get_cr() & CR_XP);
 }
 }
 
 
-#define set_section_perms(perms, field)	{				\
-	size_t i;							\
-	unsigned long addr;						\
-									\
-	if (!arch_has_strict_perms())					\
-		return;							\
-									\
-	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
-		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
-		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
-			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
-				perms[i].start, perms[i].end,		\
-				SECTION_SIZE);				\
-			continue;					\
-		}							\
-									\
-		for (addr = perms[i].start;				\
-		     addr < perms[i].end;				\
-		     addr += SECTION_SIZE)				\
-			section_update(addr, perms[i].mask,		\
-				       perms[i].field);			\
-	}								\
+void set_section_perms(struct section_perm *perms, int n, bool set,
+			struct mm_struct *mm)
+{
+	size_t i;
+	unsigned long addr;
+
+	if (!arch_has_strict_perms())
+		return;
+
+	for (i = 0; i < n; i++) {
+		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
+		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
+			pr_err("BUG: section %lx-%lx not aligned to %lx\n",
+				perms[i].start, perms[i].end,
+				SECTION_SIZE);
+			continue;
+		}
+
+		for (addr = perms[i].start;
+		     addr < perms[i].end;
+		     addr += SECTION_SIZE)
+			section_update(addr, perms[i].mask,
+				set ? perms[i].prot : perms[i].clear, mm);
+	}
+
 }
 }
 
 
-static inline void fix_kernmem_perms(void)
+static void update_sections_early(struct section_perm perms[], int n)
 {
 {
-	set_section_perms(nx_perms, prot);
+	struct task_struct *t, *s;
+
+	read_lock(&tasklist_lock);
+	for_each_process(t) {
+		if (t->flags & PF_KTHREAD)
+			continue;
+		for_each_thread(t, s)
+			set_section_perms(perms, n, true, s->mm);
+	}
+	read_unlock(&tasklist_lock);
+	set_section_perms(perms, n, true, current->active_mm);
+	set_section_perms(perms, n, true, &init_mm);
+}
+
+int __fix_kernmem_perms(void *unused)
+{
+	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
+	return 0;
+}
+
+void fix_kernmem_perms(void)
+{
+	stop_machine(__fix_kernmem_perms, NULL, NULL);
 }
 }
 
 
 #ifdef CONFIG_DEBUG_RODATA
 #ifdef CONFIG_DEBUG_RODATA
+int __mark_rodata_ro(void *unused)
+{
+	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
+	return 0;
+}
+
 void mark_rodata_ro(void)
 void mark_rodata_ro(void)
 {
 {
-	set_section_perms(ro_perms, prot);
+	stop_machine(__mark_rodata_ro, NULL, NULL);
 }
 }
 
 
 void set_kernel_text_rw(void)
 void set_kernel_text_rw(void)
 {
 {
-	set_section_perms(ro_perms, clear);
+	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
+				current->active_mm);
 }
 }
 
 
 void set_kernel_text_ro(void)
 void set_kernel_text_ro(void)
 {
 {
-	set_section_perms(ro_perms, prot);
+	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
+				current->active_mm);
 }
 }
 #endif /* CONFIG_DEBUG_RODATA */
 #endif /* CONFIG_DEBUG_RODATA */
 
 

+ 2 - 2
arch/arm/mm/proc-v7.S

@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
 .equ	cpu_v7_suspend_size, 4 * 9
 .equ	cpu_v7_suspend_size, 4 * 9
 #ifdef CONFIG_ARM_CPU_SUSPEND
 #ifdef CONFIG_ARM_CPU_SUSPEND
 ENTRY(cpu_v7_do_suspend)
 ENTRY(cpu_v7_do_suspend)
-	stmfd	sp!, {r4 - r10, lr}
+	stmfd	sp!, {r4 - r11, lr}
 	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 	mrc	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
 	mrc	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
 	stmia	r0!, {r4 - r5}
 	stmia	r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
 	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
 	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
 	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
 	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
 	stmia	r0, {r5 - r11}
 	stmia	r0, {r5 - r11}
-	ldmfd	sp!, {r4 - r10, pc}
+	ldmfd	sp!, {r4 - r11, pc}
 ENDPROC(cpu_v7_do_suspend)
 ENDPROC(cpu_v7_do_suspend)
 
 
 ENTRY(cpu_v7_do_resume)
 ENTRY(cpu_v7_do_resume)

+ 5 - 0
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi

@@ -269,6 +269,7 @@
 			clock-frequency = <0>;	/* Updated by bootloader */
 			clock-frequency = <0>;	/* Updated by bootloader */
 			voltage-ranges = <1800 1800 3300 3300>;
 			voltage-ranges = <1800 1800 3300 3300>;
 			sdhci,auto-cmd12;
 			sdhci,auto-cmd12;
+			little-endian;
 			bus-width = <4>;
 			bus-width = <4>;
 		};
 		};
 
 
@@ -277,6 +278,7 @@
 			reg = <0x0 0x2300000 0x0 0x10000>;
 			reg = <0x0 0x2300000 0x0 0x10000>;
 			interrupts = <0 36 0x4>; /* Level high type */
 			interrupts = <0 36 0x4>; /* Level high type */
 			gpio-controller;
 			gpio-controller;
+			little-endian;
 			#gpio-cells = <2>;
 			#gpio-cells = <2>;
 			interrupt-controller;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			#interrupt-cells = <2>;
@@ -287,6 +289,7 @@
 			reg = <0x0 0x2310000 0x0 0x10000>;
 			reg = <0x0 0x2310000 0x0 0x10000>;
 			interrupts = <0 36 0x4>; /* Level high type */
 			interrupts = <0 36 0x4>; /* Level high type */
 			gpio-controller;
 			gpio-controller;
+			little-endian;
 			#gpio-cells = <2>;
 			#gpio-cells = <2>;
 			interrupt-controller;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			#interrupt-cells = <2>;
@@ -297,6 +300,7 @@
 			reg = <0x0 0x2320000 0x0 0x10000>;
 			reg = <0x0 0x2320000 0x0 0x10000>;
 			interrupts = <0 37 0x4>; /* Level high type */
 			interrupts = <0 37 0x4>; /* Level high type */
 			gpio-controller;
 			gpio-controller;
+			little-endian;
 			#gpio-cells = <2>;
 			#gpio-cells = <2>;
 			interrupt-controller;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			#interrupt-cells = <2>;
@@ -307,6 +311,7 @@
 			reg = <0x0 0x2330000 0x0 0x10000>;
 			reg = <0x0 0x2330000 0x0 0x10000>;
 			interrupts = <0 37 0x4>; /* Level high type */
 			interrupts = <0 37 0x4>; /* Level high type */
 			gpio-controller;
 			gpio-controller;
+			little-endian;
 			#gpio-cells = <2>;
 			#gpio-cells = <2>;
 			interrupt-controller;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			#interrupt-cells = <2>;

+ 1 - 0
arch/arm64/include/asm/arch_gicv3.h

@@ -77,6 +77,7 @@
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
 #include <linux/stringify.h>
 #include <linux/stringify.h>
+#include <asm/barrier.h>
 
 
 /*
 /*
  * Low-level accessors
  * Low-level accessors

+ 8 - 4
arch/arm64/include/asm/pgtable.h

@@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 	 * hardware updates of the pte (ptep_set_access_flags safely changes
 	 * hardware updates of the pte (ptep_set_access_flags safely changes
 	 * valid ptes without going through an invalid entry).
 	 * valid ptes without going through an invalid entry).
 	 */
 	 */
-	if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
-	    pte_valid(*ptep)) {
-		BUG_ON(!pte_young(pte));
-		BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+	if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+	    pte_valid(*ptep) && pte_valid(pte)) {
+		VM_WARN_ONCE(!pte_young(pte),
+			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
+			     __func__, pte_val(*ptep), pte_val(pte));
+		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
+			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
+			     __func__, pte_val(*ptep), pte_val(pte));
 	}
 	}
 
 
 	set_pte(ptep, pte);
 	set_pte(ptep, pte);

+ 3 - 2
arch/arm64/kernel/vmlinux.lds.S

@@ -5,6 +5,7 @@
  */
  */
 
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
@@ -140,7 +141,7 @@ SECTIONS
 		ARM_EXIT_KEEP(EXIT_DATA)
 		ARM_EXIT_KEEP(EXIT_DATA)
 	}
 	}
 
 
-	PERCPU_SECTION(64)
+	PERCPU_SECTION(L1_CACHE_BYTES)
 
 
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	__init_end = .;
 	__init_end = .;
@@ -158,7 +159,7 @@ SECTIONS
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	_data = .;
 	_data = .;
 	_sdata = .;
 	_sdata = .;
-	RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 	PECOFF_EDATA_PADDING
 	PECOFF_EDATA_PADDING
 	_edata = .;
 	_edata = .;
 
 

+ 1 - 1
arch/blackfin/kernel/perf_event.c

@@ -14,7 +14,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *
  *
  * ppc:
  * ppc:

+ 1 - 1
arch/ia64/include/asm/unistd.h

@@ -11,7 +11,7 @@
 
 
 
 
 
 
-#define NR_syscalls			322 /* length of syscall table */
+#define NR_syscalls			323 /* length of syscall table */
 
 
 /*
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
  * The following defines stop scripts/checksyscalls.sh from complaining about

+ 1 - 0
arch/ia64/include/uapi/asm/unistd.h

@@ -335,5 +335,6 @@
 #define __NR_userfaultfd		1343
 #define __NR_userfaultfd		1343
 #define __NR_membarrier			1344
 #define __NR_membarrier			1344
 #define __NR_kcmp			1345
 #define __NR_kcmp			1345
+#define __NR_mlock2			1346
 
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
 #endif /* _UAPI_ASM_IA64_UNISTD_H */

+ 1 - 0
arch/ia64/kernel/entry.S

@@ -1771,5 +1771,6 @@ sys_call_table:
 	data8 sys_userfaultfd
 	data8 sys_userfaultfd
 	data8 sys_membarrier
 	data8 sys_membarrier
 	data8 sys_kcmp				// 1345
 	data8 sys_kcmp				// 1345
+	data8 sys_mlock2
 
 
 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls

+ 2 - 1
arch/microblaze/kernel/dma.c

@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 	/* FIXME this part of code is untested */
 	/* FIXME this part of code is untested */
 	for_each_sg(sgl, sg, nents, i) {
 	for_each_sg(sgl, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
 		sg->dma_address = sg_phys(sg);
-		__dma_sync(sg_phys(sg), sg->length, direction);
+		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
+							sg->length, direction);
 	}
 	}
 
 
 	return nents;
 	return nents;

+ 1 - 1
arch/mips/mm/dma-default.c

@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 
 
 	gfp = massage_gfp_flags(dev, gfp);
 	gfp = massage_gfp_flags(dev, gfp);
 
 
-	if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
+	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
 		page = dma_alloc_from_contiguous(dev,
 		page = dma_alloc_from_contiguous(dev,
 					count, get_order(size));
 					count, get_order(size));
 	if (!page)
 	if (!page)

+ 2 - 1
arch/parisc/include/asm/pgtable.h

@@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
  */
  */
 #ifdef CONFIG_HUGETLB_PAGE
 #ifdef CONFIG_HUGETLB_PAGE
 #define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
 #define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
-#define pte_mkhuge(pte)         (__pte(pte_val(pte) | _PAGE_HUGE))
+#define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
+				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
 #else
 #else
 #define pte_huge(pte)           (0)
 #define pte_huge(pte)           (0)
 #define pte_mkhuge(pte)         (pte)
 #define pte_mkhuge(pte)         (pte)

+ 2 - 1
arch/parisc/include/uapi/asm/unistd.h

@@ -360,8 +360,9 @@
 #define __NR_execveat		(__NR_Linux + 342)
 #define __NR_execveat		(__NR_Linux + 342)
 #define __NR_membarrier		(__NR_Linux + 343)
 #define __NR_membarrier		(__NR_Linux + 343)
 #define __NR_userfaultfd	(__NR_Linux + 344)
 #define __NR_userfaultfd	(__NR_Linux + 344)
+#define __NR_mlock2		(__NR_Linux + 345)
 
 
-#define __NR_Linux_syscalls	(__NR_userfaultfd + 1)
+#define __NR_Linux_syscalls	(__NR_mlock2 + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */

+ 0 - 18
arch/parisc/kernel/pci.c

@@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
 }
 }
 
 
 
 
-void __init pcibios_init_bus(struct pci_bus *bus)
-{
-	struct pci_dev *dev = bus->self;
-	unsigned short bridge_ctl;
-
-	/* We deal only with pci controllers and pci-pci bridges. */
-	if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
-		return;
-
-	/* PCI-PCI bridge - set the cache line and default latency
-	   (32) for primary and secondary buses. */
-	pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
-
-	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
-	bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
-	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
-}
-
 /*
 /*
  * pcibios align resources() is called every time generic PCI code
  * pcibios align resources() is called every time generic PCI code
  * wants to generate a new address. The process of looking for
  * wants to generate a new address. The process of looking for

+ 1 - 0
arch/parisc/kernel/syscall_table.S

@@ -440,6 +440,7 @@
 	ENTRY_COMP(execveat)
 	ENTRY_COMP(execveat)
 	ENTRY_SAME(membarrier)
 	ENTRY_SAME(membarrier)
 	ENTRY_SAME(userfaultfd)
 	ENTRY_SAME(userfaultfd)
+	ENTRY_SAME(mlock2)		/* 345 */
 
 
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

+ 0 - 8
arch/powerpc/boot/dts/sbc8641d.dts

@@ -227,23 +227,15 @@
 				reg = <0x520 0x20>;
 				reg = <0x520 0x20>;
 
 
 				phy0: ethernet-phy@1f {
 				phy0: ethernet-phy@1f {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
 					reg = <0x1f>;
 					reg = <0x1f>;
 				};
 				};
 				phy1: ethernet-phy@0 {
 				phy1: ethernet-phy@0 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
 					reg = <0>;
 					reg = <0>;
 				};
 				};
 				phy2: ethernet-phy@1 {
 				phy2: ethernet-phy@1 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
 					reg = <1>;
 					reg = <1>;
 				};
 				};
 				phy3: ethernet-phy@2 {
 				phy3: ethernet-phy@2 {
-					interrupt-parent = <&mpic>;
-					interrupts = <10 1>;
 					reg = <2>;
 					reg = <2>;
 				};
 				};
 				tbi0: tbi-phy@11 {
 				tbi0: tbi-phy@11 {

+ 12 - 12
arch/powerpc/include/asm/systbl.h

@@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
 PPC64ONLY(switch_endian)
 PPC64ONLY(switch_endian)
 SYSCALL_SPU(userfaultfd)
 SYSCALL_SPU(userfaultfd)
 SYSCALL_SPU(membarrier)
 SYSCALL_SPU(membarrier)
-SYSCALL(semop)
-SYSCALL(semget)
-COMPAT_SYS(semctl)
-COMPAT_SYS(semtimedop)
-COMPAT_SYS(msgsnd)
-COMPAT_SYS(msgrcv)
-SYSCALL(msgget)
-COMPAT_SYS(msgctl)
-COMPAT_SYS(shmat)
-SYSCALL(shmdt)
-SYSCALL(shmget)
-COMPAT_SYS(shmctl)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
 SYSCALL(mlock2)
 SYSCALL(mlock2)

+ 0 - 12
arch/powerpc/include/uapi/asm/unistd.h

@@ -388,18 +388,6 @@
 #define __NR_switch_endian	363
 #define __NR_switch_endian	363
 #define __NR_userfaultfd	364
 #define __NR_userfaultfd	364
 #define __NR_membarrier		365
 #define __NR_membarrier		365
-#define __NR_semop		366
-#define __NR_semget		367
-#define __NR_semctl		368
-#define __NR_semtimedop		369
-#define __NR_msgsnd		370
-#define __NR_msgrcv		371
-#define __NR_msgget		372
-#define __NR_msgctl		373
-#define __NR_shmat		374
-#define __NR_shmdt		375
-#define __NR_shmget		376
-#define __NR_shmctl		377
 #define __NR_mlock2		378
 #define __NR_mlock2		378
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

+ 4 - 10
arch/powerpc/kernel/eeh_driver.c

@@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
 	eeh_ops->configure_bridge(pe);
 	eeh_ops->configure_bridge(pe);
 	eeh_pe_restore_bars(pe);
 	eeh_pe_restore_bars(pe);
 
 
-	/*
-	 * If it's PHB PE, the frozen state on all available PEs should have
-	 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
-	 * child PEs because they might be in frozen state.
-	 */
-	if (!(pe->type & EEH_PE_PHB)) {
-		rc = eeh_clear_pe_frozen_state(pe, false);
-		if (rc)
-			return rc;
-	}
+	/* Clear frozen state */
+	rc = eeh_clear_pe_frozen_state(pe, false);
+	if (rc)
+		return rc;
 
 
 	/* Give the system 5 seconds to finish running the user-space
 	/* Give the system 5 seconds to finish running the user-space
 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
 	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,

+ 6 - 0
arch/powerpc/kvm/book3s_hv.c

@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 
 
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 {
 {
+	/*
+	 * Check for illegal transactional state bit combination
+	 * and if we find it, force the TS field to a safe state.
+	 */
+	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
+		msr &= ~MSR_TS_MASK;
 	vcpu->arch.shregs.msr = msr;
 	vcpu->arch.shregs.msr = msr;
 	kvmppc_end_cede(vcpu);
 	kvmppc_end_cede(vcpu);
 }
 }

+ 38 - 26
arch/powerpc/platforms/powernv/opal-irqchip.c

@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
 static unsigned int *opal_irqs;
 static unsigned int *opal_irqs;
 
 
 static void opal_handle_irq_work(struct irq_work *work);
 static void opal_handle_irq_work(struct irq_work *work);
-static __be64 last_outstanding_events;
+static u64 last_outstanding_events;
 static struct irq_work opal_event_irq_work = {
 static struct irq_work opal_event_irq_work = {
 	.func = opal_handle_irq_work,
 	.func = opal_handle_irq_work,
 };
 };
 
 
+void opal_handle_events(uint64_t events)
+{
+	int virq, hwirq = 0;
+	u64 mask = opal_event_irqchip.mask;
+
+	if (!in_irq() && (events & mask)) {
+		last_outstanding_events = events;
+		irq_work_queue(&opal_event_irq_work);
+		return;
+	}
+
+	while (events & mask) {
+		hwirq = fls64(events) - 1;
+		if (BIT_ULL(hwirq) & mask) {
+			virq = irq_find_mapping(opal_event_irqchip.domain,
+						hwirq);
+			if (virq)
+				generic_handle_irq(virq);
+		}
+		events &= ~BIT_ULL(hwirq);
+	}
+}
+
 static void opal_event_mask(struct irq_data *d)
 static void opal_event_mask(struct irq_data *d)
 {
 {
 	clear_bit(d->hwirq, &opal_event_irqchip.mask);
 	clear_bit(d->hwirq, &opal_event_irqchip.mask);
@@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
 
 
 static void opal_event_unmask(struct irq_data *d)
 static void opal_event_unmask(struct irq_data *d)
 {
 {
+	__be64 events;
+
 	set_bit(d->hwirq, &opal_event_irqchip.mask);
 	set_bit(d->hwirq, &opal_event_irqchip.mask);
 
 
-	opal_poll_events(&last_outstanding_events);
+	opal_poll_events(&events);
+	last_outstanding_events = be64_to_cpu(events);
+
+	/*
+	 * We can't just handle the events now with opal_handle_events().
+	 * If we did we would deadlock when opal_event_unmask() is called from
+	 * handle_level_irq() with the irq descriptor lock held, because
+	 * calling opal_handle_events() would call generic_handle_irq() and
+	 * then handle_level_irq() which would try to take the descriptor lock
+	 * again. Instead queue the events for later.
+	 */
 	if (last_outstanding_events & opal_event_irqchip.mask)
 	if (last_outstanding_events & opal_event_irqchip.mask)
 		/* Need to retrigger the interrupt */
 		/* Need to retrigger the interrupt */
 		irq_work_queue(&opal_event_irq_work);
 		irq_work_queue(&opal_event_irq_work);
@@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
 	return 0;
 	return 0;
 }
 }
 
 
-void opal_handle_events(uint64_t events)
-{
-	int virq, hwirq = 0;
-	u64 mask = opal_event_irqchip.mask;
-
-	if (!in_irq() && (events & mask)) {
-		last_outstanding_events = events;
-		irq_work_queue(&opal_event_irq_work);
-		return;
-	}
-
-	while (events & mask) {
-		hwirq = fls64(events) - 1;
-		if (BIT_ULL(hwirq) & mask) {
-			virq = irq_find_mapping(opal_event_irqchip.domain,
-						hwirq);
-			if (virq)
-				generic_handle_irq(virq);
-		}
-		events &= ~BIT_ULL(hwirq);
-	}
-}
-
 static irqreturn_t opal_interrupt(int irq, void *data)
 static irqreturn_t opal_interrupt(int irq, void *data)
 {
 {
 	__be64 events;
 	__be64 events;
@@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 
 
 static void opal_handle_irq_work(struct irq_work *work)
 static void opal_handle_irq_work(struct irq_work *work)
 {
 {
-	opal_handle_events(be64_to_cpu(last_outstanding_events));
+	opal_handle_events(last_outstanding_events);
 }
 }
 
 
 static int opal_event_match(struct irq_domain *h, struct device_node *node,
 static int opal_event_match(struct irq_domain *h, struct device_node *node,

+ 1 - 1
arch/powerpc/platforms/powernv/opal.c

@@ -278,7 +278,7 @@ static void opal_handle_message(void)
 
 
 	/* Sanity check */
 	/* Sanity check */
 	if (type >= OPAL_MSG_TYPE_MAX) {
 	if (type >= OPAL_MSG_TYPE_MAX) {
-		pr_warning("%s: Unknown message type: %u\n", __func__, type);
+		pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
 		return;
 		return;
 	}
 	}
 	opal_message_do_notify(type, (void *)&msg);
 	opal_message_do_notify(type, (void *)&msg);

+ 12 - 5
arch/s390/kernel/dis.c

@@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
 			}
 			}
 			if (separator)
 			if (separator)
 				ptr += sprintf(ptr, "%c", separator);
 				ptr += sprintf(ptr, "%c", separator);
+			/*
+			 * Use four '%' characters below because of the
+			 * following two conversions:
+			 *
+			 *  1) sprintf: %%%%r -> %%r
+			 *  2) printk : %%r   -> %r
+			 */
 			if (operand->flags & OPERAND_GPR)
 			if (operand->flags & OPERAND_GPR)
-				ptr += sprintf(ptr, "%%r%i", value);
+				ptr += sprintf(ptr, "%%%%r%i", value);
 			else if (operand->flags & OPERAND_FPR)
 			else if (operand->flags & OPERAND_FPR)
-				ptr += sprintf(ptr, "%%f%i", value);
+				ptr += sprintf(ptr, "%%%%f%i", value);
 			else if (operand->flags & OPERAND_AR)
 			else if (operand->flags & OPERAND_AR)
-				ptr += sprintf(ptr, "%%a%i", value);
+				ptr += sprintf(ptr, "%%%%a%i", value);
 			else if (operand->flags & OPERAND_CR)
 			else if (operand->flags & OPERAND_CR)
-				ptr += sprintf(ptr, "%%c%i", value);
+				ptr += sprintf(ptr, "%%%%c%i", value);
 			else if (operand->flags & OPERAND_VR)
 			else if (operand->flags & OPERAND_VR)
-				ptr += sprintf(ptr, "%%v%i", value);
+				ptr += sprintf(ptr, "%%%%v%i", value);
 			else if (operand->flags & OPERAND_PCREL)
 			else if (operand->flags & OPERAND_PCREL)
 				ptr += sprintf(ptr, "%lx", (signed int) value
 				ptr += sprintf(ptr, "%lx", (signed int) value
 								      + addr);
 								      + addr);

+ 1 - 1
arch/sh/include/uapi/asm/unistd_64.h

@@ -278,7 +278,7 @@
 #define __NR_fsetxattr		256
 #define __NR_fsetxattr		256
 #define __NR_getxattr		257
 #define __NR_getxattr		257
 #define __NR_lgetxattr		258
 #define __NR_lgetxattr		258
-#define __NR_fgetxattr		269
+#define __NR_fgetxattr		259
 #define __NR_listxattr		260
 #define __NR_listxattr		260
 #define __NR_llistxattr		261
 #define __NR_llistxattr		261
 #define __NR_flistxattr		262
 #define __NR_flistxattr		262

+ 1 - 1
arch/sh/kernel/perf_event.c

@@ -10,7 +10,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *
  *
  * ppc:
  * ppc:

+ 1 - 1
arch/sparc/kernel/perf_event.c

@@ -9,7 +9,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  */
  */
 
 
 #include <linux/perf_event.h>
 #include <linux/perf_event.h>

+ 1 - 1
arch/tile/kernel/perf_event.c

@@ -21,7 +21,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  */
  */

+ 1 - 1
arch/um/Makefile

@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
 # The wrappers will select whether using "malloc" or the kernel allocator.
 # The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
 
-LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
+LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
 
 
 # Used by link-vmlinux.sh which has special support for um link
 # Used by link-vmlinux.sh which has special support for um link
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)

+ 6 - 4
arch/um/drivers/net_user.c

@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
 
 
 char *split_if_spec(char *str, ...)
 char *split_if_spec(char *str, ...)
 {
 {
-	char **arg, *end;
+	char **arg, *end, *ret = NULL;
 	va_list ap;
 	va_list ap;
 
 
 	va_start(ap, str);
 	va_start(ap, str);
 	while ((arg = va_arg(ap, char **)) != NULL) {
 	while ((arg = va_arg(ap, char **)) != NULL) {
 		if (*str == '\0')
 		if (*str == '\0')
-			return NULL;
+			goto out;
 		end = strchr(str, ',');
 		end = strchr(str, ',');
 		if (end != str)
 		if (end != str)
 			*arg = str;
 			*arg = str;
 		if (end == NULL)
 		if (end == NULL)
-			return NULL;
+			goto out;
 		*end++ = '\0';
 		*end++ = '\0';
 		str = end;
 		str = end;
 	}
 	}
+	ret = str;
+out:
 	va_end(ap);
 	va_end(ap);
-	return str;
+	return ret;
 }
 }

+ 1 - 1
arch/um/kernel/signal.c

@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
 	struct ksignal ksig;
 	struct ksignal ksig;
 	int handled_sig = 0;
 	int handled_sig = 0;
 
 
-	while (get_signal(&ksig)) {
+	if (get_signal(&ksig)) {
 		handled_sig = 1;
 		handled_sig = 1;
 		/* Whee!  Actually deliver the signal.  */
 		/* Whee!  Actually deliver the signal.  */
 		handle_signal(&ksig, regs);
 		handle_signal(&ksig, regs);

+ 1 - 1
arch/x86/kernel/cpu/perf_event.c

@@ -5,7 +5,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *
  *

+ 3 - 2
arch/x86/kernel/cpu/perf_event.h

@@ -5,7 +5,7 @@
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Jaswinder Singh Rajput
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  *
  *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
 /* Check flags and event code/umask, and set the HSW N/A flag */
 /* Check flags and event code/umask, and set the HSW N/A flag */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
 	__EVENT_CONSTRAINT(code, n, 			\
 	__EVENT_CONSTRAINT(code, n, 			\
-			  INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
+			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 
 
 
 
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
 	u64 lbr_from[MAX_LBR_ENTRIES];
 	u64 lbr_from[MAX_LBR_ENTRIES];
 	u64 lbr_to[MAX_LBR_ENTRIES];
 	u64 lbr_to[MAX_LBR_ENTRIES];
 	u64 lbr_info[MAX_LBR_ENTRIES];
 	u64 lbr_info[MAX_LBR_ENTRIES];
+	int tos;
 	int lbr_callstack_users;
 	int lbr_callstack_users;
 	int lbr_stack_state;
 	int lbr_stack_state;
 };
 };

+ 1 - 1
arch/x86/kernel/cpu/perf_event_intel.c

@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
-	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
+	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */

+ 1 - 1
arch/x86/kernel/cpu/perf_event_intel_cqm.c

@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
 {
 {
 	if (event->attach_state & PERF_ATTACH_TASK)
 	if (event->attach_state & PERF_ATTACH_TASK)
-		return perf_cgroup_from_task(event->hw.target);
+		return perf_cgroup_from_task(event->hw.target, event->ctx);
 
 
 	return event->cgrp;
 	return event->cgrp;
 }
 }

+ 3 - 1
arch/x86/kernel/cpu/perf_event_intel_lbr.c

@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 	}
 	}
 
 
 	mask = x86_pmu.lbr_nr - 1;
 	mask = x86_pmu.lbr_nr - 1;
-	tos = intel_pmu_lbr_tos();
+	tos = task_ctx->tos;
 	for (i = 0; i < tos; i++) {
 	for (i = 0; i < tos; i++) {
 		lbr_idx = (tos - i) & mask;
 		lbr_idx = (tos - i) & mask;
 		wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
 		wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
 	}
+	wrmsrl(x86_pmu.lbr_tos, tos);
 	task_ctx->lbr_stack_state = LBR_NONE;
 	task_ctx->lbr_stack_state = LBR_NONE;
 }
 }
 
 
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
 	}
+	task_ctx->tos = tos;
 	task_ctx->lbr_stack_state = LBR_VALID;
 	task_ctx->lbr_stack_state = LBR_VALID;
 }
 }
 
 

+ 1 - 1
arch/x86/kernel/irq_work.c

@@ -1,7 +1,7 @@
 /*
 /*
  * x86 specific code for irq_work
  * x86 specific code for irq_work
  *
  *
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  */
  */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>

+ 8 - 0
arch/x86/kvm/cpuid.h

@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
 }
 }
 
 
+static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	return best && (best->edx & bit(X86_FEATURE_MTRR));
+}
+
 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
 {
 {
 	struct kvm_cpuid_entry2 *best;
 	struct kvm_cpuid_entry2 *best;

+ 19 - 6
arch/x86/kvm/mtrr.c

@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 }
 }
 
 
-static u8 mtrr_disabled_type(void)
+static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
 {
 {
 	/*
 	/*
 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
 	 * memory type is applied to all of physical memory.
 	 * memory type is applied to all of physical memory.
+	 *
+	 * However, virtual machines can be run with CPUID such that
+	 * there are no MTRRs.  In that case, the firmware will never
+	 * enable MTRRs and it is obviously undesirable to run the
+	 * guest entirely with UC memory and we use WB.
 	 */
 	 */
-	return MTRR_TYPE_UNCACHABLE;
+	if (guest_cpuid_has_mtrr(vcpu))
+		return MTRR_TYPE_UNCACHABLE;
+	else
+		return MTRR_TYPE_WRBACK;
 }
 }
 
 
 /*
 /*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
 
 
 	for (seg = 0; seg < seg_num; seg++) {
 	for (seg = 0; seg < seg_num; seg++) {
 		mtrr_seg = &fixed_seg_table[seg];
 		mtrr_seg = &fixed_seg_table[seg];
-		if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
+		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
 			return seg;
 			return seg;
 	}
 	}
 
 
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
 	*start = range->base & PAGE_MASK;
 	*start = range->base & PAGE_MASK;
 
 
 	mask = range->mask & PAGE_MASK;
 	mask = range->mask & PAGE_MASK;
-	mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
 
 
 	/* This cannot overflow because writing to the reserved bits of
 	/* This cannot overflow because writing to the reserved bits of
 	 * variable MTRRs causes a #GP.
 	 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 	if (var_mtrr_range_is_valid(cur))
 	if (var_mtrr_range_is_valid(cur))
 		list_del(&mtrr_state->var_ranges[index].node);
 		list_del(&mtrr_state->var_ranges[index].node);
 
 
+	/* Extend the mask with all 1 bits to the left, since those
+	 * bits must implicitly be 0.  The bits are then cleared
+	 * when reading them.
+	 */
 	if (!is_mtrr_mask)
 	if (!is_mtrr_mask)
 		cur->base = data;
 		cur->base = data;
 	else
 	else
-		cur->mask = data;
+		cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
 
 
 	/* add it to the list if it's enabled. */
 	/* add it to the list if it's enabled. */
 	if (var_mtrr_range_is_valid(cur)) {
 	if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
 		else
 		else
 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
+
+		*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 	}
 	}
 
 
 	if (iter.mtrr_disabled)
 	if (iter.mtrr_disabled)
-		return mtrr_disabled_type();
+		return mtrr_disabled_type(vcpu);
 
 
 	/* not contained in any MTRRs. */
 	/* not contained in any MTRRs. */
 	if (type == -1)
 	if (type == -1)

+ 2 - 2
arch/x86/kvm/svm.c

@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 	struct kvm_run *kvm_run = vcpu->run;
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
 	u32 exit_code = svm->vmcb->control.exit_code;
 
 
+	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
+
 	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
 	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 	if (npt_enabled)
 	if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
 	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
 	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
 
-	trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
-
 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 		kvm_before_handle_nmi(&svm->vcpu);
 		kvm_before_handle_nmi(&svm->vcpu);
 
 

+ 4 - 3
arch/x86/kvm/vmx.c

@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = vcpu->arch.ia32_xss;
 		msr_info->data = vcpu->arch.ia32_xss;
 		break;
 		break;
 	case MSR_TSC_AUX:
 	case MSR_TSC_AUX:
-		if (!guest_cpuid_has_rdtscp(vcpu))
+		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
 			return 1;
 			return 1;
 		/* Otherwise falls through */
 		/* Otherwise falls through */
 	default:
 	default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 		break;
 		break;
 	case MSR_TSC_AUX:
 	case MSR_TSC_AUX:
-		if (!guest_cpuid_has_rdtscp(vcpu))
+		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
 			return 1;
 			return 1;
 		/* Check reserved bit, higher 32 bits should be zero */
 		/* Check reserved bit, higher 32 bits should be zero */
 		if ((data >> 32) != 0)
 		if ((data >> 32) != 0)
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
 	u32 exit_reason = vmx->exit_reason;
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
 
+	trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
+
 	/*
 	/*
 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx->loaded_vmcs->launched = 1;
 	vmx->loaded_vmcs->launched = 1;
 
 
 	vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
 	vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
-	trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
 
 
 	/*
 	/*
 	 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
 	 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if

+ 8 - 4
arch/x86/kvm/x86.c

@@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 
 
 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 {
 {
+	int i;
 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
 	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
 	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
-	kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
+	for (i = 0; i < 3; i++)
+		kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 	return 0;
 	return 0;
 }
 }
@@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 {
 {
 	int start = 0;
 	int start = 0;
+	int i;
 	u32 prev_legacy, cur_legacy;
 	u32 prev_legacy, cur_legacy;
 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
 	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
 	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
 	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
 	       sizeof(kvm->arch.vpit->pit_state.channels));
 	       sizeof(kvm->arch.vpit->pit_state.channels));
 	kvm->arch.vpit->pit_state.flags = ps->flags;
 	kvm->arch.vpit->pit_state.flags = ps->flags;
-	kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
+	for (i = 0; i < 3; i++)
+		kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 	return 0;
 	return 0;
 }
 }
@@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	if (req_immediate_exit)
 	if (req_immediate_exit)
 		smp_send_reschedule(vcpu->cpu);
 		smp_send_reschedule(vcpu->cpu);
 
 
+	trace_kvm_entry(vcpu->vcpu_id);
+	wait_lapic_expire(vcpu);
 	__kvm_guest_enter();
 	__kvm_guest_enter();
 
 
 	if (unlikely(vcpu->arch.switch_db_regs)) {
 	if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
 	}
 	}
 
 
-	trace_kvm_entry(vcpu->vcpu_id);
-	wait_lapic_expire(vcpu);
 	kvm_x86_ops->run(vcpu);
 	kvm_x86_ops->run(vcpu);
 
 
 	/*
 	/*

+ 1 - 1
arch/x86/mm/dump_pagetables.c

@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
 	{ 0/* VMALLOC_START */, "vmalloc() Area" },
 	{ 0/* VMALLOC_START */, "vmalloc() Area" },
 	{ 0/*VMALLOC_END*/,     "vmalloc() End" },
 	{ 0/*VMALLOC_END*/,     "vmalloc() End" },
 # ifdef CONFIG_HIGHMEM
 # ifdef CONFIG_HIGHMEM
-	{ 0/*PKMAP_BASE*/,      "Persisent kmap() Area" },
+	{ 0/*PKMAP_BASE*/,      "Persistent kmap() Area" },
 # endif
 # endif
 	{ 0/*FIXADDR_START*/,   "Fixmap Area" },
 	{ 0/*FIXADDR_START*/,   "Fixmap Area" },
 #endif
 #endif

+ 10 - 8
arch/x86/um/signal.c

@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
 		if (err)
 		if (err)
 			return 1;
 			return 1;
 
 
-		err = convert_fxsr_from_user(&fpx, sc.fpstate);
+		err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
 		if (err)
 		if (err)
 			return 1;
 			return 1;
 
 
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
 	{
 	{
 		struct user_i387_struct fp;
 		struct user_i387_struct fp;
 
 
-		err = copy_from_user(&fp, sc.fpstate,
+		err = copy_from_user(&fp, (void *)sc.fpstate,
 				     sizeof(struct user_i387_struct));
 				     sizeof(struct user_i387_struct));
 		if (err)
 		if (err)
 			return 1;
 			return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
 #endif
 #endif
 #undef PUTREG
 #undef PUTREG
 	sc.oldmask = mask;
 	sc.oldmask = mask;
-	sc.fpstate = to_fp;
+	sc.fpstate = (unsigned long)to_fp;
 
 
 	err = copy_to_user(to, &sc, sizeof(struct sigcontext));
 	err = copy_to_user(to, &sc, sizeof(struct sigcontext));
 	if (err)
 	if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
 	struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
 	struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
 	sigset_t set;
 	sigset_t set;
 	struct sigcontext __user *sc = &frame->sc;
 	struct sigcontext __user *sc = &frame->sc;
-	unsigned long __user *oldmask = &sc->oldmask;
-	unsigned long __user *extramask = frame->extramask;
 	int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
 	int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
 
 
-	if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
-	    copy_from_user(&set.sig[1], extramask, sig_size))
+	if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
+	    copy_from_user(&set.sig[1], frame->extramask, sig_size))
 		goto segfault;
 		goto segfault;
 
 
 	set_current_blocked(&set);
 	set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
 {
 {
 	struct rt_sigframe __user *frame;
 	struct rt_sigframe __user *frame;
 	int err = 0, sig = ksig->sig;
 	int err = 0, sig = ksig->sig;
+	unsigned long fp_to;
 
 
 	frame = (struct rt_sigframe __user *)
 	frame = (struct rt_sigframe __user *)
 		round_down(stack_top - sizeof(struct rt_sigframe), 16);
 		round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
 	err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
 	err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
 	err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
 	err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
 			       set->sig[0]);
 			       set->sig[0]);
-	err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
+
+	fp_to = (unsigned long)&frame->fpstate;
+
+	err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
 	if (sizeof(*set) == 16) {
 	if (sizeof(*set) == 16) {
 		err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
 		err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
 		err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
 		err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);

+ 2 - 7
arch/x86/xen/mmu.c

@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
 {
 {
 	x86_init.paging.pagetable_init = xen_pagetable_init;
 	x86_init.paging.pagetable_init = xen_pagetable_init;
 
 
-	/* Optimization - we can use the HVM one but it has no idea which
-	 * VCPUs are descheduled - which means that it will needlessly IPI
-	 * them. Xen knows so let it do the job.
-	 */
-	if (xen_feature(XENFEAT_auto_translated_physmap)) {
-		pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
+	if (xen_feature(XENFEAT_auto_translated_physmap))
 		return;
 		return;
-	}
+
 	pv_mmu_ops = xen_mmu_ops;
 	pv_mmu_ops = xen_mmu_ops;
 
 
 	memset(dummy_mapping, 0xff, PAGE_SIZE);
 	memset(dummy_mapping, 0xff, PAGE_SIZE);

+ 10 - 10
arch/x86/xen/suspend.c

@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
 
 
 void xen_arch_pre_suspend(void)
 void xen_arch_pre_suspend(void)
 {
 {
-	int cpu;
-
-	for_each_online_cpu(cpu)
-		xen_pmu_finish(cpu);
-
 	if (xen_pv_domain())
 	if (xen_pv_domain())
 		xen_pv_pre_suspend();
 		xen_pv_pre_suspend();
 }
 }
 
 
 void xen_arch_post_suspend(int cancelled)
 void xen_arch_post_suspend(int cancelled)
 {
 {
-	int cpu;
-
 	if (xen_pv_domain())
 	if (xen_pv_domain())
 		xen_pv_post_suspend(cancelled);
 		xen_pv_post_suspend(cancelled);
 	else
 	else
 		xen_hvm_post_suspend(cancelled);
 		xen_hvm_post_suspend(cancelled);
-
-	for_each_online_cpu(cpu)
-		xen_pmu_init(cpu);
 }
 }
 
 
 static void xen_vcpu_notify_restore(void *data)
 static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
 
 
 void xen_arch_resume(void)
 void xen_arch_resume(void)
 {
 {
+	int cpu;
+
 	on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
 	on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
+
+	for_each_online_cpu(cpu)
+		xen_pmu_init(cpu);
 }
 }
 
 
 void xen_arch_suspend(void)
 void xen_arch_suspend(void)
 {
 {
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		xen_pmu_finish(cpu);
+
 	on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
 	on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
 }
 }

+ 3 - 3
block/blk-cgroup.c

@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
  * of the main cic data structures.  For now we allow a task to change
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  * its cgroup only if it's the only owner of its ioc.
  */
  */
-static int blkcg_can_attach(struct cgroup_subsys_state *css,
-			    struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_taskset *tset)
 {
 {
 	struct task_struct *task;
 	struct task_struct *task;
+	struct cgroup_subsys_state *dst_css;
 	struct io_context *ioc;
 	struct io_context *ioc;
 	int ret = 0;
 	int ret = 0;
 
 
 	/* task_lock() is needed to avoid races with exit_io_context() */
 	/* task_lock() is needed to avoid races with exit_io_context() */
-	cgroup_taskset_for_each(task, tset) {
+	cgroup_taskset_for_each(task, dst_css, tset) {
 		task_lock(task);
 		task_lock(task);
 		ioc = task->io_context;
 		ioc = task->io_context;
 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)

+ 14 - 2
block/blk-core.c

@@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 	struct request *req;
 	struct request *req;
 	unsigned int request_count = 0;
 	unsigned int request_count = 0;
 
 
-	blk_queue_split(q, &bio, q->bio_split);
-
 	/*
 	/*
 	 * low level driver can indicate that it wants pages above a
 	 * low level driver can indicate that it wants pages above a
 	 * certain limit bounced to low memory (ie for highmem, or even
 	 * certain limit bounced to low memory (ie for highmem, or even
@@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 	 */
 	 */
 	blk_queue_bounce(q, &bio);
 	blk_queue_bounce(q, &bio);
 
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
 		bio->bi_error = -EIO;
 		bio->bi_error = -EIO;
 		bio_endio(bio);
 		bio_endio(bio);
@@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
+	if (!q->dev)
+		return ret;
+
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
 	if (q->nr_pending) {
 	if (q->nr_pending) {
 		ret = -EBUSY;
 		ret = -EBUSY;
@@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
  */
  */
 void blk_post_runtime_suspend(struct request_queue *q, int err)
 void blk_post_runtime_suspend(struct request_queue *q, int err)
 {
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
 	if (!err) {
 	if (!err) {
 		q->rpm_status = RPM_SUSPENDED;
 		q->rpm_status = RPM_SUSPENDED;
@@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
  */
  */
 void blk_pre_runtime_resume(struct request_queue *q)
 void blk_pre_runtime_resume(struct request_queue *q)
 {
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
 	q->rpm_status = RPM_RESUMING;
 	q->rpm_status = RPM_RESUMING;
 	spin_unlock_irq(q->queue_lock);
 	spin_unlock_irq(q->queue_lock);
@@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
  */
  */
 void blk_post_runtime_resume(struct request_queue *q, int err)
 void blk_post_runtime_resume(struct request_queue *q, int err)
 {
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
 	if (!err) {
 	if (!err) {
 		q->rpm_status = RPM_ACTIVE;
 		q->rpm_status = RPM_ACTIVE;

+ 1 - 1
crypto/ablkcipher.c

@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
 	if (WARN_ON_ONCE(in_irq()))
 	if (WARN_ON_ONCE(in_irq()))
 		return -EDEADLK;
 		return -EDEADLK;
 
 
+	walk->iv = req->info;
 	walk->nbytes = walk->total;
 	walk->nbytes = walk->total;
 	if (unlikely(!walk->total))
 	if (unlikely(!walk->total))
 		return 0;
 		return 0;
 
 
 	walk->iv_buffer = NULL;
 	walk->iv_buffer = NULL;
-	walk->iv = req->info;
 	if (unlikely(((unsigned long)walk->iv & alignmask))) {
 	if (unlikely(((unsigned long)walk->iv & alignmask))) {
 		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 
 

+ 1 - 1
crypto/blkcipher.c

@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
 	if (WARN_ON_ONCE(in_irq()))
 	if (WARN_ON_ONCE(in_irq()))
 		return -EDEADLK;
 		return -EDEADLK;
 
 
+	walk->iv = desc->info;
 	walk->nbytes = walk->total;
 	walk->nbytes = walk->total;
 	if (unlikely(!walk->total))
 	if (unlikely(!walk->total))
 		return 0;
 		return 0;
 
 
 	walk->buffer = NULL;
 	walk->buffer = NULL;
-	walk->iv = desc->info;
 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 		int err = blkcipher_copy_iv(walk);
 		int err = blkcipher_copy_iv(walk);
 		if (err)
 		if (err)

+ 1 - 1
drivers/acpi/nfit.c

@@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
 	if (!dev->driver) {
 	if (!dev->driver) {
 		/* dev->driver may be null if we're being removed */
 		/* dev->driver may be null if we're being removed */
 		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
 		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
-		return;
+		goto out_unlock;
 	}
 	}
 
 
 	if (!acpi_desc) {
 	if (!acpi_desc) {

+ 12 - 10
drivers/ata/ahci.c

@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
 	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
 	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
 	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
 	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
 	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
-	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
-	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
-	{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
-	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
 	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
 	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
 	{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
 	{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
 	{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
 	{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
 	{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+	{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
 	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
 	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
 	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+	{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
 	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
 	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
 	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
 	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+	{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+	{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
 
 
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

+ 5 - 0
drivers/ata/ahci_mvebu.c

@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
 	writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
 	writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
 }
 }
 
 
+#ifdef CONFIG_PM_SLEEP
 static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
 static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
 {
 {
 	return ahci_platform_suspend_host(&pdev->dev);
 	return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
 
 
 	return ahci_platform_resume_host(&pdev->dev);
 	return ahci_platform_resume_host(&pdev->dev);
 }
 }
+#else
+#define ahci_mvebu_suspend NULL
+#define ahci_mvebu_resume NULL
+#endif
 
 
 static const struct ata_port_info ahci_mvebu_port_info = {
 static const struct ata_port_info ahci_mvebu_port_info = {
 	.flags	   = AHCI_FLAG_COMMON,
 	.flags	   = AHCI_FLAG_COMMON,

+ 9 - 0
drivers/ata/libahci.c

@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
 
 
+	/* set port value for softreset of Port Multiplier */
+	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
+		tmp = readl(port_mmio + PORT_FBS);
+		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+		tmp |= pmp << PORT_FBS_DEV_OFFSET;
+		writel(tmp, port_mmio + PORT_FBS);
+		pp->fbs_last_dev = pmp;
+	}
+
 	/* issue & wait */
 	/* issue & wait */
 	writel(1, port_mmio + PORT_CMD_ISSUE);
 	writel(1, port_mmio + PORT_CMD_ISSUE);
 
 

+ 8 - 0
drivers/ata/libata-eh.c

@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
 			       u8 page, void *buf, unsigned int sectors)
 			       u8 page, void *buf, unsigned int sectors)
 {
 {
+	unsigned long ap_flags = dev->link->ap->flags;
 	struct ata_taskfile tf;
 	struct ata_taskfile tf;
 	unsigned int err_mask;
 	unsigned int err_mask;
 	bool dma = false;
 	bool dma = false;
 
 
 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
 
 
+	/*
+	 * Return error without actually issuing the command on controllers
+	 * which e.g. lockup on a read log page.
+	 */
+	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
+		return AC_ERR_DEV;
+
 retry:
 retry:
 	ata_tf_init(dev, &tf);
 	ata_tf_init(dev, &tf);
 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&

+ 2 - 1
drivers/ata/sata_fsl.c

@@ -45,7 +45,8 @@ enum {
 	SATA_FSL_MAX_PRD_DIRECT	= 16,	/* Direct PRDT entries */
 	SATA_FSL_MAX_PRD_DIRECT	= 16,	/* Direct PRDT entries */
 
 
 	SATA_FSL_HOST_FLAGS	= (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
 	SATA_FSL_HOST_FLAGS	= (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
-				ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
+				   ATA_FLAG_PMP | ATA_FLAG_NCQ |
+				   ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
 
 
 	SATA_FSL_MAX_CMDS	= SATA_FSL_QUEUE_DEPTH,
 	SATA_FSL_MAX_CMDS	= SATA_FSL_QUEUE_DEPTH,
 	SATA_FSL_CMD_HDR_SIZE	= 16,	/* 4 DWORDS */
 	SATA_FSL_CMD_HDR_SIZE	= 16,	/* 4 DWORDS */

Vissa filer visades inte eftersom för många filer har ändrats