瀏覽代碼

Merge tag 'fixes-for-v4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus

Felipe writes:

usb: fixes for v4.11-rc4

f_acm got an endianness fix by Oliver Neukum. This has been around for a
long time but it's finally fixed.

f_hid learned that it should never access hidg->req without first
grabbing the spinlock.

Roger Quadros fixed two bugs in the f_uvc function driver.

Janusz Dziedzic fixed a very peculiar bug with EP0, one that's rather
difficult to trigger. When we're dealing with bounced EP0 requests, we
should delay unmap until after ->complete() is called.

UDC class got a use-after-free fix.
Greg Kroah-Hartman 8 年之前
父節點
當前提交
5617c05d44
共有 100 個文件被更改,包括 1392 次插入562 次删除
  1. 6 5
      Documentation/cgroup-v2.txt
  2. 60 2
      Documentation/devicetree/bindings/powerpc/4xx/emac.txt
  3. 2 1
      Documentation/networking/ip-sysctl.txt
  4. 1 1
      Makefile
  5. 1 0
      arch/arm/tools/syscall.tbl
  6. 4 0
      arch/arm64/Kconfig
  7. 1 1
      arch/arm64/include/asm/cpufeature.h
  8. 1 1
      arch/arm64/kernel/cpuidle.c
  9. 0 6
      arch/arm64/kernel/probes/kprobes.c
  10. 1 1
      arch/arm64/mm/kasan_init.c
  11. 6 2
      arch/openrisc/include/asm/cmpxchg.h
  12. 1 1
      arch/openrisc/include/asm/uaccess.h
  13. 4 0
      arch/openrisc/kernel/or32_ksyms.c
  14. 1 0
      arch/openrisc/kernel/process.c
  15. 2 21
      arch/parisc/include/asm/cacheflush.h
  16. 2 1
      arch/parisc/include/asm/uaccess.h
  17. 2 1
      arch/parisc/include/uapi/asm/unistd.h
  18. 22 0
      arch/parisc/kernel/cache.c
  19. 8 0
      arch/parisc/kernel/module.c
  20. 49 45
      arch/parisc/kernel/perf.c
  21. 2 0
      arch/parisc/kernel/process.c
  22. 1 0
      arch/parisc/kernel/syscall_table.S
  23. 1 0
      arch/powerpc/boot/zImage.lds.S
  24. 1 1
      arch/powerpc/crypto/crc32c-vpmsum_glue.c
  25. 4 0
      arch/powerpc/include/asm/bitops.h
  26. 107 1
      arch/powerpc/include/asm/mce.h
  27. 1 0
      arch/powerpc/include/asm/systbl.h
  28. 1 1
      arch/powerpc/include/asm/unistd.h
  29. 1 0
      arch/powerpc/include/uapi/asm/unistd.h
  30. 3 0
      arch/powerpc/kernel/cputable.c
  31. 86 2
      arch/powerpc/kernel/mce.c
  32. 237 0
      arch/powerpc/kernel/mce_power.c
  33. 2 0
      arch/powerpc/perf/core-book3s.c
  34. 36 7
      arch/powerpc/perf/isa207-common.c
  35. 1 0
      arch/powerpc/perf/isa207-common.h
  36. 6 15
      arch/powerpc/platforms/powernv/opal.c
  37. 15 5
      arch/powerpc/platforms/powernv/pci-ioda.c
  38. 3 1
      arch/powerpc/platforms/pseries/lpar.c
  39. 14 2
      arch/x86/events/core.c
  40. 0 3
      arch/x86/include/asm/pgtable-3level.h
  41. 1 1
      arch/x86/include/asm/pgtable.h
  42. 7 2
      arch/x86/kernel/acpi/boot.c
  43. 7 19
      arch/x86/kernel/apic/apic.c
  44. 1 1
      arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
  45. 1 0
      arch/x86/kernel/head64.c
  46. 2 4
      arch/x86/kernel/nmi.c
  47. 2 0
      arch/x86/kernel/tsc.c
  48. 30 6
      arch/x86/kernel/unwind_frame.c
  49. 1 0
      arch/x86/mm/kasan_init_64.c
  50. 1 1
      arch/x86/mm/mpx.c
  51. 1 0
      arch/x86/platform/intel-mid/device_libs/Makefile
  52. 82 0
      arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
  53. 1 1
      arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
  54. 4 11
      arch/x86/platform/intel-mid/mfld.c
  55. 9 3
      block/bio.c
  56. 18 12
      block/blk-core.c
  57. 3 0
      block/blk-mq-tag.c
  58. 5 4
      block/blk-mq.c
  59. 5 4
      crypto/af_alg.c
  60. 5 4
      crypto/algif_hash.c
  61. 42 15
      drivers/acpi/acpi_processor.c
  62. 0 1
      drivers/acpi/bus.c
  63. 22 111
      drivers/acpi/processor_core.c
  64. 4 2
      drivers/ata/ahci_qoriq.c
  65. 0 1
      drivers/ata/libata-sff.c
  66. 3 6
      drivers/ata/libata-transport.c
  67. 0 5
      drivers/base/core.c
  68. 14 2
      drivers/char/hw_random/omap-rng.c
  69. 1 15
      drivers/clocksource/tcb_clksrc.c
  70. 5 3
      drivers/cpufreq/cpufreq.c
  71. 31 33
      drivers/cpufreq/intel_pstate.c
  72. 85 47
      drivers/crypto/s5p-sss.c
  73. 30 3
      drivers/dax/dax.c
  74. 0 2
      drivers/gpu/drm/amd/acp/Makefile
  75. 2 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  76. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  77. 6 0
      drivers/gpu/drm/amd/amdgpu/si_dpm.c
  78. 1 1
      drivers/gpu/drm/amd/amdgpu/vi.c
  79. 1 1
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
  80. 1 2
      drivers/gpu/drm/arm/malidp_crtc.c
  81. 1 1
      drivers/gpu/drm/arm/malidp_hw.c
  82. 16 2
      drivers/gpu/drm/arm/malidp_planes.c
  83. 1 0
      drivers/gpu/drm/arm/malidp_regs.h
  84. 1 0
      drivers/gpu/drm/i915/i915_drv.h
  85. 94 3
      drivers/gpu/drm/i915/i915_gem.c
  86. 4 4
      drivers/gpu/drm/i915/i915_gem_evict.c
  87. 3 0
      drivers/gpu/drm/i915/i915_gem_object.h
  88. 37 20
      drivers/gpu/drm/i915/i915_vma.c
  89. 29 29
      drivers/gpu/drm/i915/intel_display.c
  90. 4 6
      drivers/gpu/drm/i915/intel_fbdev.c
  91. 13 5
      drivers/gpu/drm/i915/intel_pm.c
  92. 0 3
      drivers/gpu/drm/i915/intel_sprite.c
  93. 6 7
      drivers/gpu/drm/i915/intel_uncore.c
  94. 0 3
      drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
  95. 6 0
      drivers/gpu/drm/radeon/si_dpm.c
  96. 24 13
      drivers/gpu/drm/tilcdc/tilcdc_crtc.c
  97. 3 0
      drivers/isdn/gigaset/bas-gigaset.c
  98. 1 0
      drivers/macintosh/macio_asic.c
  99. 16 13
      drivers/md/dm.c
  100. 1 1
      drivers/md/md-cluster.c

+ 6 - 5
Documentation/cgroup-v2.txt

@@ -1142,16 +1142,17 @@ used by the kernel.
 
 
   pids.max
   pids.max
 
 
- A read-write single value file which exists on non-root cgroups.  The
- default is "max".
+	A read-write single value file which exists on non-root
+	cgroups.  The default is "max".
 
 
- Hard limit of number of processes.
+	Hard limit of number of processes.
 
 
   pids.current
   pids.current
 
 
- A read-only single value file which exists on all cgroups.
+	A read-only single value file which exists on all cgroups.
 
 
- The number of processes currently in the cgroup and its descendants.
+	The number of processes currently in the cgroup and its
+	descendants.
 
 
 Organisational operations are not blocked by cgroup policies, so it is
 Organisational operations are not blocked by cgroup policies, so it is
 possible to have pids.current > pids.max.  This can be done by either
 possible to have pids.current > pids.max.  This can be done by either

+ 60 - 2
Documentation/devicetree/bindings/powerpc/4xx/emac.txt

@@ -71,6 +71,9 @@
 			  For Axon it can be absent, though my current driver
 			  For Axon it can be absent, though my current driver
 			  doesn't handle phy-address yet so for now, keep
 			  doesn't handle phy-address yet so for now, keep
 			  0x00ffffff in it.
 			  0x00ffffff in it.
+    - phy-handle	: Used to describe configurations where a external PHY
+			  is used. Please refer to:
+			  Documentation/devicetree/bindings/net/ethernet.txt
     - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
     - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
 			  operations (if absent the value is the same as
 			  operations (if absent the value is the same as
 			  rx-fifo-size).  For Axon, either absent or 2048.
 			  rx-fifo-size).  For Axon, either absent or 2048.
@@ -81,8 +84,22 @@
 			  offload, phandle of the TAH device node.
 			  offload, phandle of the TAH device node.
     - tah-channel       : 1 cell, optional. If appropriate, channel used on the
     - tah-channel       : 1 cell, optional. If appropriate, channel used on the
 			  TAH engine.
 			  TAH engine.
+    - fixed-link	: Fixed-link subnode describing a link to a non-MDIO
+			  managed entity. See
+			  Documentation/devicetree/bindings/net/fixed-link.txt
+			  for details.
+    - mdio subnode	: When the EMAC has a phy connected to its local
+			  mdio, which us supported by the kernel's network
+			  PHY library in drivers/net/phy, there must be device
+			  tree subnode with the following required properties:
+				- #address-cells: Must be <1>.
+				- #size-cells: Must be <0>.
 
 
-    Example:
+			  For PHY definitions: Please refer to
+			  Documentation/devicetree/bindings/net/phy.txt and
+			  Documentation/devicetree/bindings/net/ethernet.txt
+
+    Examples:
 
 
 	EMAC0: ethernet@40000800 {
 	EMAC0: ethernet@40000800 {
 		device_type = "network";
 		device_type = "network";
@@ -104,6 +121,48 @@
 		zmii-channel = <0>;
 		zmii-channel = <0>;
 	};
 	};
 
 
+	EMAC1: ethernet@ef600c00 {
+		device_type = "network";
+		compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
+		interrupt-parent = <&EMAC1>;
+		interrupts = <0 1>;
+		#interrupt-cells = <1>;
+		#address-cells = <0>;
+		#size-cells = <0>;
+		interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
+				 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
+		reg = <0xef600c00 0x000000c4>;
+		local-mac-address = [000000000000]; /* Filled in by U-Boot */
+		mal-device = <&MAL0>;
+		mal-tx-channel = <0>;
+		mal-rx-channel = <0>;
+		cell-index = <0>;
+		max-frame-size = <9000>;
+		rx-fifo-size = <16384>;
+		tx-fifo-size = <2048>;
+		fifo-entry-size = <10>;
+		phy-mode = "rgmii";
+		phy-handle = <&phy0>;
+		phy-map = <0x00000000>;
+		rgmii-device = <&RGMII0>;
+		rgmii-channel = <0>;
+		tah-device = <&TAH0>;
+		tah-channel = <0>;
+		has-inverted-stacr-oc;
+		has-new-stacr-staopc;
+
+	        mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			phy0: ethernet-phy@0 {
+				compatible = "ethernet-phy-ieee802.3-c22";
+				reg = <0>;
+			};
+		};
+	};
+
+
       ii) McMAL node
       ii) McMAL node
 
 
     Required properties:
     Required properties:
@@ -145,4 +204,3 @@
     - revision           : as provided by the RGMII new version register if
     - revision           : as provided by the RGMII new version register if
 			   available.
 			   available.
 			   For Axon: 0x0000012a
 			   For Axon: 0x0000012a
-

+ 2 - 1
Documentation/networking/ip-sysctl.txt

@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
 		FALSE (router)
 		FALSE (router)
 
 
 forwarding - BOOLEAN
 forwarding - BOOLEAN
-	Enable IP forwarding on this interface.
+	Enable IP forwarding on this interface.  This controls whether packets
+	received _on_ this interface can be forwarded.
 
 
 mc_forwarding - BOOLEAN
 mc_forwarding - BOOLEAN
 	Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
 	Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 11
 PATCHLEVEL = 11
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 0
arch/arm/tools/syscall.tbl

@@ -411,3 +411,4 @@
 394	common	pkey_mprotect		sys_pkey_mprotect
 394	common	pkey_mprotect		sys_pkey_mprotect
 395	common	pkey_alloc		sys_pkey_alloc
 395	common	pkey_alloc		sys_pkey_alloc
 396	common	pkey_free		sys_pkey_free
 396	common	pkey_free		sys_pkey_free
+397	common	statx			sys_statx

+ 4 - 0
arch/arm64/Kconfig

@@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
 	def_bool y
 	def_bool y
 	depends on COMPAT && SYSVIPC
 	depends on COMPAT && SYSVIPC
 
 
+config KEYS_COMPAT
+	def_bool y
+	depends on COMPAT && KEYS
+
 endmenu
 endmenu
 
 
 menu "Power management options"
 menu "Power management options"

+ 1 - 1
arch/arm64/include/asm/cpufeature.h

@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
 static inline bool system_uses_ttbr0_pan(void)
 static inline bool system_uses_ttbr0_pan(void)
 {
 {
 	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
 	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
-		!cpus_have_cap(ARM64_HAS_PAN);
+		!cpus_have_const_cap(ARM64_HAS_PAN);
 }
 }
 
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */

+ 1 - 1
arch/arm64/kernel/cpuidle.c

@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
 }
 }
 
 
 /**
 /**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
  * @arg: argument to pass to CPU suspend operations
  * @arg: argument to pass to CPU suspend operations
  *
  *
  * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
  * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU

+ 0 - 6
arch/arm64/kernel/probes/kprobes.c

@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
 	return 0;
 	return 0;
 }
 }
 
 
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-				       unsigned long val, void *data)
-{
-	return NOTIFY_DONE;
-}
-
 static void __kprobes kprobe_handler(struct pt_regs *regs)
 static void __kprobes kprobe_handler(struct pt_regs *regs)
 {
 {
 	struct kprobe *p, *cur_kprobe;
 	struct kprobe *p, *cur_kprobe;

+ 1 - 1
arch/arm64/mm/kasan_init.c

@@ -162,7 +162,7 @@ void __init kasan_init(void)
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
 
 	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
 	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
-			 pfn_to_nid(virt_to_pfn(_text)));
+			 pfn_to_nid(virt_to_pfn(lm_alias(_text))));
 
 
 	/*
 	/*
 	 * vmemmap_populate() has populated the shadow region that covers the
 	 * vmemmap_populate() has populated the shadow region that covers the

+ 6 - 2
arch/openrisc/include/asm/cmpxchg.h

@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
 	return val;
 	return val;
 }
 }
 
 
-#define xchg(ptr, with) \
-	((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with) 						\
+	({								\
+		(__typeof__(*(ptr))) __xchg((unsigned long)(with),	\
+					    (ptr),			\
+					    sizeof(*(ptr)));		\
+	})
 
 
 #endif /* __ASM_OPENRISC_CMPXCHG_H */
 #endif /* __ASM_OPENRISC_CMPXCHG_H */

+ 1 - 1
arch/openrisc/include/asm/uaccess.h

@@ -211,7 +211,7 @@ do {									\
 	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
 	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
 	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
 	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
 	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
 	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
-	case 8: __get_user_asm2(x, ptr, retval);			\
+	case 8: __get_user_asm2(x, ptr, retval); break;			\
 	default: (x) = __get_user_bad();				\
 	default: (x) = __get_user_bad();				\
 	}								\
 	}								\
 } while (0)
 } while (0)

+ 4 - 0
arch/openrisc/kernel/or32_ksyms.c

@@ -30,6 +30,7 @@
 #include <asm/hardirq.h>
 #include <asm/hardirq.h>
 #include <asm/delay.h>
 #include <asm/delay.h>
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 
 
 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
 
 
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
 DECLARE_EXPORT(__ashrdi3);
 DECLARE_EXPORT(__ashrdi3);
 DECLARE_EXPORT(__ashldi3);
 DECLARE_EXPORT(__ashldi3);
 DECLARE_EXPORT(__lshrdi3);
 DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
 
 
+EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memset);

+ 1 - 0
arch/openrisc/kernel/process.c

@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
 }
 }
 
 
 void (*pm_power_off) (void) = machine_power_off;
 void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
 
 
 /*
 /*
  * When a process does an "exec", machine state like FPU and debug
  * When a process does an "exec", machine state like FPU and debug

+ 2 - 21
arch/parisc/include/asm/cacheflush.h

@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
 
 
 #define flush_kernel_dcache_range(start,size) \
 #define flush_kernel_dcache_range(start,size) \
 	flush_kernel_dcache_range_asm((start), (start)+(size));
 	flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates.  Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
-
-	flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
-	void *cursor = vaddr;
 
 
-	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
-		struct page *page = vmalloc_to_page(cursor);
-
-		if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-			flush_kernel_dcache_page(page);
-	}
-	flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()

+ 2 - 1
arch/parisc/include/asm/uaccess.h

@@ -32,7 +32,8 @@
  * that put_user is the same as __put_user, etc.
  * that put_user is the same as __put_user, etc.
  */
  */
 
 
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size)	\
+	( (uaddr) == (uaddr) )
 
 
 #define put_user __put_user
 #define put_user __put_user
 #define get_user __get_user
 #define get_user __get_user

+ 2 - 1
arch/parisc/include/uapi/asm/unistd.h

@@ -362,8 +362,9 @@
 #define __NR_copy_file_range	(__NR_Linux + 346)
 #define __NR_copy_file_range	(__NR_Linux + 346)
 #define __NR_preadv2		(__NR_Linux + 347)
 #define __NR_preadv2		(__NR_Linux + 347)
 #define __NR_pwritev2		(__NR_Linux + 348)
 #define __NR_pwritev2		(__NR_Linux + 348)
+#define __NR_statx		(__NR_Linux + 349)
 
 
-#define __NR_Linux_syscalls	(__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls	(__NR_statx + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */

+ 22 - 0
arch/parisc/kernel/cache.c

@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 	}
 	}
 }
 }
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);

+ 8 - 0
arch/parisc/kernel/module.c

@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
 			 */
 			 */
 			*loc = fsel(val, addend); 
 			*loc = fsel(val, addend); 
 			break;
 			break;
+		case R_PARISC_SECREL32:
+			/* 32-bit section relative address. */
+			*loc = fsel(val, addend);
+			break;
 		case R_PARISC_DPREL21L:
 		case R_PARISC_DPREL21L:
 			/* left 21 bit of relative address */
 			/* left 21 bit of relative address */
 			val = lrsel(val - dp, addend);
 			val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
 			 */
 			 */
 			*loc = fsel(val, addend); 
 			*loc = fsel(val, addend); 
 			break;
 			break;
+		case R_PARISC_SECREL32:
+			/* 32-bit section relative address. */
+			*loc = fsel(val, addend);
+			break;
 		case R_PARISC_FPTR64:
 		case R_PARISC_FPTR64:
 			/* 64-bit function address */
 			/* 64-bit function address */
 			if(in_local(me, (void *)(val + addend))) {
 			if(in_local(me, (void *)(val + addend))) {

+ 49 - 45
arch/parisc/kernel/perf.c

@@ -39,7 +39,7 @@
  *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
  *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
  *  in various PDC revisions.  The code is much more maintainable
  *  in various PDC revisions.  The code is much more maintainable
  *  and reliable this way vs having to debug on every version of PDC
  *  and reliable this way vs having to debug on every version of PDC
- *  on every box. 
+ *  on every box.
  */
  */
 
 
 #include <linux/capability.h>
 #include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
 static int perf_release(struct inode *inode, struct file *file);
 static int perf_release(struct inode *inode, struct file *file);
 static int perf_open(struct inode *inode, struct file *file);
 static int perf_open(struct inode *inode, struct file *file);
 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-	loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos);
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 static void perf_start_counters(void);
 static void perf_start_counters(void);
 static int perf_stop_counters(uint32_t *raddr);
 static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
 /*
 /*
  * configure:
  * configure:
  *
  *
- * Configure the cpu with a given data image.  First turn off the counters, 
+ * Configure the cpu with a given data image.  First turn off the counters,
  * then download the image, then turn the counters back on.
  * then download the image, then turn the counters back on.
  */
  */
 static int perf_config(uint32_t *image_ptr)
 static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
 	error = perf_stop_counters(raddr);
 	error = perf_stop_counters(raddr);
 	if (error != 0) {
 	if (error != 0) {
 		printk("perf_config: perf_stop_counters = %ld\n", error);
 		printk("perf_config: perf_stop_counters = %ld\n", error);
-		return -EINVAL; 
+		return -EINVAL;
 	}
 	}
 
 
 printk("Preparing to write image\n");
 printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
 	error = perf_write_image((uint64_t *)image_ptr);
 	error = perf_write_image((uint64_t *)image_ptr);
 	if (error != 0) {
 	if (error != 0) {
 		printk("perf_config: DOWNLOAD = %ld\n", error);
 		printk("perf_config: DOWNLOAD = %ld\n", error);
-		return -EINVAL; 
+		return -EINVAL;
 	}
 	}
 
 
 printk("Preparing to start counters\n");
 printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
 }
 }
 
 
 /*
 /*
- * Open the device and initialize all of its memory.  The device is only 
+ * Open the device and initialize all of its memory.  The device is only
  * opened once, but can be "queried" by multiple processes that know its
  * opened once, but can be "queried" by multiple processes that know its
  * file descriptor.
  * file descriptor.
  */
  */
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
  * called on the processor that the download should happen
  * called on the processor that the download should happen
  * on.
  * on.
  */
  */
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-	loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
 {
 {
 	size_t image_size;
 	size_t image_size;
 	uint32_t image_type;
 	uint32_t image_type;
 	uint32_t interface_type;
 	uint32_t interface_type;
 	uint32_t test;
 	uint32_t test;
 
 
-	if (perf_processor_interface == ONYX_INTF) 
+	if (perf_processor_interface == ONYX_INTF)
 		image_size = PCXU_IMAGE_SIZE;
 		image_size = PCXU_IMAGE_SIZE;
-	else if (perf_processor_interface == CUDA_INTF) 
+	else if (perf_processor_interface == CUDA_INTF)
 		image_size = PCXW_IMAGE_SIZE;
 		image_size = PCXW_IMAGE_SIZE;
-	else 
+	else
 		return -EFAULT;
 		return -EFAULT;
 
 
 	if (!capable(CAP_SYS_ADMIN))
 	if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 
 
 	/* First check the machine type is correct for
 	/* First check the machine type is correct for
 	   the requested image */
 	   the requested image */
-        if (((perf_processor_interface == CUDA_INTF) &&
-		       (interface_type != CUDA_INTF)) ||
-	    ((perf_processor_interface == ONYX_INTF) &&
-	               (interface_type != ONYX_INTF))) 
+	if (((perf_processor_interface == CUDA_INTF) &&
+			(interface_type != CUDA_INTF)) ||
+		((perf_processor_interface == ONYX_INTF) &&
+			(interface_type != ONYX_INTF)))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* Next check to make sure the requested image
 	/* Next check to make sure the requested image
 	   is valid */
 	   is valid */
-	if (((interface_type == CUDA_INTF) && 
+	if (((interface_type == CUDA_INTF) &&
 		       (test >= MAX_CUDA_IMAGES)) ||
 		       (test >= MAX_CUDA_IMAGES)) ||
-	    ((interface_type == ONYX_INTF) && 
-		       (test >= MAX_ONYX_IMAGES))) 
+	    ((interface_type == ONYX_INTF) &&
+		       (test >= MAX_ONYX_IMAGES)))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* Copy the image into the processor */
 	/* Copy the image into the processor */
-	if (interface_type == CUDA_INTF) 
+	if (interface_type == CUDA_INTF)
 		return perf_config(cuda_images[test]);
 		return perf_config(cuda_images[test]);
 	else
 	else
 		return perf_config(onyx_images[test]);
 		return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 static void perf_patch_images(void)
 static void perf_patch_images(void)
 {
 {
 #if 0 /* FIXME!! */
 #if 0 /* FIXME!! */
-/* 
+/*
  * NOTE:  this routine is VERY specific to the current TLB image.
  * NOTE:  this routine is VERY specific to the current TLB image.
  * If the image is changed, this routine might also need to be changed.
  * If the image is changed, this routine might also need to be changed.
  */
  */
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
 	extern void $i_dtlb_miss_2_0();
 	extern void $i_dtlb_miss_2_0();
 	extern void PA2_0_iva();
 	extern void PA2_0_iva();
 
 
-	/* 
+	/*
 	 * We can only use the lower 32-bits, the upper 32-bits should be 0
 	 * We can only use the lower 32-bits, the upper 32-bits should be 0
-	 * anyway given this is in the kernel 
+	 * anyway given this is in the kernel
 	 */
 	 */
 	uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
 	uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
 	uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
 	uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
 
 
 	if (perf_processor_interface == ONYX_INTF) {
 	if (perf_processor_interface == ONYX_INTF) {
 		/* clear last 2 bytes */
 		/* clear last 2 bytes */
-		onyx_images[TLBMISS][15] &= 0xffffff00;  
+		onyx_images[TLBMISS][15] &= 0xffffff00;
 		/* set 2 bytes */
 		/* set 2 bytes */
 		onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
 		onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
 		onyx_images[TLBMISS][17] = itlb_addr;
 		onyx_images[TLBMISS][17] = itlb_addr;
 
 
 		/* clear last 2 bytes */
 		/* clear last 2 bytes */
-		onyx_images[TLBHANDMISS][15] &= 0xffffff00;  
+		onyx_images[TLBHANDMISS][15] &= 0xffffff00;
 		/* set 2 bytes */
 		/* set 2 bytes */
 		onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
 		onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
 		onyx_images[TLBHANDMISS][17] = itlb_addr;
 		onyx_images[TLBHANDMISS][17] = itlb_addr;
 
 
 		/* clear last 2 bytes */
 		/* clear last 2 bytes */
-		onyx_images[BIG_CPI][15] &= 0xffffff00;  
+		onyx_images[BIG_CPI][15] &= 0xffffff00;
 		/* set 2 bytes */
 		/* set 2 bytes */
 		onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
 		onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
 		onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
 
 
 	} else if (perf_processor_interface == CUDA_INTF) {
 	} else if (perf_processor_interface == CUDA_INTF) {
 		/* Cuda interface */
 		/* Cuda interface */
-		cuda_images[TLBMISS][16] =  
+		cuda_images[TLBMISS][16] =
 			(cuda_images[TLBMISS][16]&0xffff0000) |
 			(cuda_images[TLBMISS][16]&0xffff0000) |
 			((dtlb_addr >> 8)&0x0000ffff);
 			((dtlb_addr >> 8)&0x0000ffff);
-		cuda_images[TLBMISS][17] = 
+		cuda_images[TLBMISS][17] =
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 		cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
 		cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
 
 
-		cuda_images[TLBHANDMISS][16] = 
+		cuda_images[TLBHANDMISS][16] =
 			(cuda_images[TLBHANDMISS][16]&0xffff0000) |
 			(cuda_images[TLBHANDMISS][16]&0xffff0000) |
 			((dtlb_addr >> 8)&0x0000ffff);
 			((dtlb_addr >> 8)&0x0000ffff);
-		cuda_images[TLBHANDMISS][17] = 
+		cuda_images[TLBHANDMISS][17] =
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 		cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
 		cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
 
 
-		cuda_images[BIG_CPI][16] = 
+		cuda_images[BIG_CPI][16] =
 			(cuda_images[BIG_CPI][16]&0xffff0000) |
 			(cuda_images[BIG_CPI][16]&0xffff0000) |
 			((dtlb_addr >> 8)&0x0000ffff);
 			((dtlb_addr >> 8)&0x0000ffff);
-		cuda_images[BIG_CPI][17] = 
+		cuda_images[BIG_CPI][17] =
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
 		cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
 		cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
 	} else {
 	} else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
 
 
 /*
 /*
  * ioctl routine
  * ioctl routine
- * All routines effect the processor that they are executed on.  Thus you 
+ * All routines effect the processor that they are executed on.  Thus you
  * must be running on the processor that you wish to change.
  * must be running on the processor that you wish to change.
  */
  */
 
 
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			}
 			}
 
 
 			/* copy out the Counters */
 			/* copy out the Counters */
-			if (copy_to_user((void __user *)arg, raddr, 
+			if (copy_to_user((void __user *)arg, raddr,
 					sizeof (raddr)) != 0) {
 					sizeof (raddr)) != 0) {
 				error =  -EFAULT;
 				error =  -EFAULT;
 				break;
 				break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
 	.open = perf_open,
 	.open = perf_open,
 	.release = perf_release
 	.release = perf_release
 };
 };
-	
+
 static struct miscdevice perf_dev = {
 static struct miscdevice perf_dev = {
 	MISC_DYNAMIC_MINOR,
 	MISC_DYNAMIC_MINOR,
 	PA_PERF_DEV,
 	PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
 		/* OR sticky2 (bit 1496) to counter2 bit 32 */
 		/* OR sticky2 (bit 1496) to counter2 bit 32 */
 		tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
 		tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
 		raddr[2] = (uint32_t)tmp64;
 		raddr[2] = (uint32_t)tmp64;
-		
+
 		/* Counter3 is bits 1497 to 1528 */
 		/* Counter3 is bits 1497 to 1528 */
 		tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
 		tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
 		/* OR sticky3 (bit 1529) to counter3 bit 32 */
 		/* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
 		userbuf[22] = 0;
 		userbuf[22] = 0;
 		userbuf[23] = 0;
 		userbuf[23] = 0;
 
 
-		/* 
+		/*
 		 * Write back the zeroed bytes + the image given
 		 * Write back the zeroed bytes + the image given
 		 * the read was destructive.
 		 * the read was destructive.
 		 */
 		 */
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
 	} else {
 	} else {
 
 
 		/*
 		/*
-		 * Read RDR-15 which contains the counters and sticky bits 
+		 * Read RDR-15 which contains the counters and sticky bits
 		 */
 		 */
 		if (!perf_rdr_read_ubuf(15, userbuf)) {
 		if (!perf_rdr_read_ubuf(15, userbuf)) {
 			return -13;
 			return -13;
 		}
 		}
 
 
-		/* 
+		/*
 		 * Clear out the counters
 		 * Clear out the counters
 		 */
 		 */
 		perf_rdr_clear(15);
 		perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
 		raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
 		raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
 		raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
 		raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
 	}
 	}
- 
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t	rdr_num, uint64_t *buffer)
 	i = tentry->num_words;
 	i = tentry->num_words;
 	while (i--) {
 	while (i--) {
 		buffer[i] = 0;
 		buffer[i] = 0;
-	}	
+	}
 
 
 	/* Check for bits an even number of 64 */
 	/* Check for bits an even number of 64 */
 	if ((xbits = width & 0x03f) != 0) {
 	if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
 	}
 	}
 
 
 	runway = ioremap_nocache(cpu_device->hpa.start, 4096);
 	runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+	if (!runway) {
+		pr_err("perf_write_image: ioremap failed!\n");
+		return -ENOMEM;
+	}
 
 
 	/* Merge intrigue bits into Runway STATUS 0 */
 	/* Merge intrigue bits into Runway STATUS 0 */
 	tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
 	tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
-	__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 
+	__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
 		     runway + RUNWAY_STATUS);
 		     runway + RUNWAY_STATUS);
-	
+
 	/* Write RUNWAY DEBUG registers */
 	/* Write RUNWAY DEBUG registers */
 	for (i = 0; i < 8; i++) {
 	for (i = 0; i < 8; i++) {
 		__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
 		__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
 	}
 	}
 
 
-	return 0; 
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
 			perf_rdr_shift_out_U(rdr_num, buffer[i]);
 			perf_rdr_shift_out_U(rdr_num, buffer[i]);
 		} else {
 		} else {
 			perf_rdr_shift_out_W(rdr_num, buffer[i]);
 			perf_rdr_shift_out_W(rdr_num, buffer[i]);
-		}	
+		}
 	}
 	}
 printk("perf_rdr_write done\n");
 printk("perf_rdr_write done\n");
 }
 }

+ 2 - 0
arch/parisc/kernel/process.c

@@ -142,6 +142,8 @@ void machine_power_off(void)
 
 
 	printk(KERN_EMERG "System shut down completed.\n"
 	printk(KERN_EMERG "System shut down completed.\n"
 	       "Please power this system off now.");
 	       "Please power this system off now.");
+
+	for (;;);
 }
 }
 
 
 void (*pm_power_off)(void) = machine_power_off;
 void (*pm_power_off)(void) = machine_power_off;

+ 1 - 0
arch/parisc/kernel/syscall_table.S

@@ -444,6 +444,7 @@
 	ENTRY_SAME(copy_file_range)
 	ENTRY_SAME(copy_file_range)
 	ENTRY_COMP(preadv2)
 	ENTRY_COMP(preadv2)
 	ENTRY_COMP(pwritev2)
 	ENTRY_COMP(pwritev2)
+	ENTRY_SAME(statx)
 
 
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

+ 1 - 0
arch/powerpc/boot/zImage.lds.S

@@ -68,6 +68,7 @@ SECTIONS
   }
   }
 
 
 #ifdef CONFIG_PPC64_BOOT_WRAPPER
 #ifdef CONFIG_PPC64_BOOT_WRAPPER
+  . = ALIGN(256);
   .got :
   .got :
   {
   {
     __toc_start = .;
     __toc_start = .;

+ 1 - 1
arch/powerpc/crypto/crc32c-vpmsum_glue.c

@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
 {
 {
 	u32 *key = crypto_tfm_ctx(tfm);
 	u32 *key = crypto_tfm_ctx(tfm);
 
 
-	*key = 0;
+	*key = ~0;
 
 
 	return 0;
 	return 0;
 }
 }

+ 4 - 0
arch/powerpc/include/asm/bitops.h

@@ -51,6 +51,10 @@
 #define PPC_BIT(bit)		(1UL << PPC_BITLSHIFT(bit))
 #define PPC_BIT(bit)		(1UL << PPC_BITLSHIFT(bit))
 #define PPC_BITMASK(bs, be)	((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
 #define PPC_BITMASK(bs, be)	((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
 
 
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit)			\
+	((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 
 
 /* Macro for generating the ***_bits() functions */
 /* Macro for generating the ***_bits() functions */

+ 107 - 1
arch/powerpc/include/asm/mce.h

@@ -66,6 +66,55 @@
 
 
 #define P8_DSISR_MC_SLB_ERRORS		(P7_DSISR_MC_SLB_ERRORS | \
 #define P8_DSISR_MC_SLB_ERRORS		(P7_DSISR_MC_SLB_ERRORS | \
 					 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
 					 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+
+/*
+ * Machine Check bits on power9
+ */
+#define P9_SRR1_MC_LOADSTORE(srr1)	(((srr1) >> PPC_BITLSHIFT(42)) & 1)
+
+#define P9_SRR1_MC_IFETCH(srr1)	(	\
+	PPC_BITEXTRACT(srr1, 45, 0) |	\
+	PPC_BITEXTRACT(srr1, 44, 1) |	\
+	PPC_BITEXTRACT(srr1, 43, 2) |	\
+	PPC_BITEXTRACT(srr1, 36, 3) )
+
+/* 0 is reserved */
+#define P9_SRR1_MC_IFETCH_UE				1
+#define P9_SRR1_MC_IFETCH_SLB_PARITY			2
+#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT			3
+#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT			4
+#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT			5
+#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD			6
+/* 7 is reserved */
+#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT			8
+#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT	9
+/* 10 ? */
+#define P9_SRR1_MC_IFETCH_RA			11
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK		12
+#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE		13
+#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT	14
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN	15
+
+/* DSISR bits for machine check (On Power9) */
+#define P9_DSISR_MC_UE					(PPC_BIT(48))
+#define P9_DSISR_MC_UE_TABLEWALK			(PPC_BIT(49))
+#define P9_DSISR_MC_LINK_LOAD_TIMEOUT			(PPC_BIT(50))
+#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT		(PPC_BIT(51))
+#define P9_DSISR_MC_ERAT_MULTIHIT			(PPC_BIT(52))
+#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB			(PPC_BIT(53))
+#define P9_DSISR_MC_USER_TLBIE				(PPC_BIT(54))
+#define P9_DSISR_MC_SLB_PARITY_MFSLB			(PPC_BIT(55))
+#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB			(PPC_BIT(56))
+#define P9_DSISR_MC_RA_LOAD				(PPC_BIT(57))
+#define P9_DSISR_MC_RA_TABLEWALK			(PPC_BIT(58))
+#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN		(PPC_BIT(59))
+#define P9_DSISR_MC_RA_FOREIGN				(PPC_BIT(60))
+
+/* SLB error bits */
+#define P9_DSISR_MC_SLB_ERRORS		(P9_DSISR_MC_ERAT_MULTIHIT | \
+					 P9_DSISR_MC_SLB_PARITY_MFSLB | \
+					 P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
+
 enum MCE_Version {
 enum MCE_Version {
 	MCE_V1 = 1,
 	MCE_V1 = 1,
 };
 };
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
 	MCE_ERROR_TYPE_SLB = 2,
 	MCE_ERROR_TYPE_SLB = 2,
 	MCE_ERROR_TYPE_ERAT = 3,
 	MCE_ERROR_TYPE_ERAT = 3,
 	MCE_ERROR_TYPE_TLB = 4,
 	MCE_ERROR_TYPE_TLB = 4,
+	MCE_ERROR_TYPE_USER = 5,
+	MCE_ERROR_TYPE_RA = 6,
+	MCE_ERROR_TYPE_LINK = 7,
 };
 };
 
 
 enum MCE_UeErrorType {
 enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
 	MCE_TLB_ERROR_MULTIHIT = 2,
 	MCE_TLB_ERROR_MULTIHIT = 2,
 };
 };
 
 
+enum MCE_UserErrorType {
+	MCE_USER_ERROR_INDETERMINATE = 0,
+	MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+	MCE_RA_ERROR_INDETERMINATE = 0,
+	MCE_RA_ERROR_IFETCH = 1,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
+	MCE_RA_ERROR_LOAD = 4,
+	MCE_RA_ERROR_STORE = 5,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
+	MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
+	MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+};
+
+enum MCE_LinkErrorType {
+	MCE_LINK_ERROR_INDETERMINATE = 0,
+	MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+	MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+	MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+	MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+	MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
 struct machine_check_event {
 struct machine_check_event {
 	enum MCE_Version	version:8;	/* 0x00 */
 	enum MCE_Version	version:8;	/* 0x00 */
 	uint8_t			in_use;		/* 0x01 */
 	uint8_t			in_use;		/* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
 			uint64_t	effective_address;
 			uint64_t	effective_address;
 			uint8_t		reserved_2[16];
 			uint8_t		reserved_2[16];
 		} tlb_error;
 		} tlb_error;
+
+		struct {
+			enum MCE_UserErrorType user_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} user_error;
+
+		struct {
+			enum MCE_RaErrorType ra_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} ra_error;
+
+		struct {
+			enum MCE_LinkErrorType link_error_type:8;
+			uint8_t		effective_address_provided;
+			uint8_t		reserved_1[6];
+			uint64_t	effective_address;
+			uint8_t		reserved_2[16];
+		} link_error;
 	} u;
 	} u;
 };
 };
 
 
@@ -176,8 +278,12 @@ struct mce_error_info {
 		enum MCE_SlbErrorType slb_error_type:8;
 		enum MCE_SlbErrorType slb_error_type:8;
 		enum MCE_EratErrorType erat_error_type:8;
 		enum MCE_EratErrorType erat_error_type:8;
 		enum MCE_TlbErrorType tlb_error_type:8;
 		enum MCE_TlbErrorType tlb_error_type:8;
+		enum MCE_UserErrorType user_error_type:8;
+		enum MCE_RaErrorType ra_error_type:8;
+		enum MCE_LinkErrorType link_error_type:8;
 	} u;
 	} u;
-	uint8_t		reserved[2];
+	enum MCE_Severity	severity:8;
+	enum MCE_Initiator	initiator:8;
 };
 };
 
 
 #define MAX_MC_EVT	100
 #define MAX_MC_EVT	100

+ 1 - 0
arch/powerpc/include/asm/systbl.h

@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
 COMPAT_SYS_SPU(preadv2)
 COMPAT_SYS_SPU(preadv2)
 COMPAT_SYS_SPU(pwritev2)
 COMPAT_SYS_SPU(pwritev2)
 SYSCALL(kexec_file_load)
 SYSCALL(kexec_file_load)
+SYSCALL(statx)

+ 1 - 1
arch/powerpc/include/asm/unistd.h

@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 #include <uapi/asm/unistd.h>
 
 
 
 
-#define NR_syscalls		383
+#define NR_syscalls		384
 
 
 #define __NR__exit __NR_exit
 #define __NR__exit __NR_exit
 
 

+ 1 - 0
arch/powerpc/include/uapi/asm/unistd.h

@@ -393,5 +393,6 @@
 #define __NR_preadv2		380
 #define __NR_preadv2		380
 #define __NR_pwritev2		381
 #define __NR_pwritev2		381
 #define __NR_kexec_file_load	382
 #define __NR_kexec_file_load	382
+#define __NR_statx		383
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

+ 3 - 0
arch/powerpc/kernel/cputable.c

@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
 extern void __flush_tlb_power9(unsigned int action);
 extern void __flush_tlb_power9(unsigned int action);
 extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_PPC64 */
 #if defined(CONFIG_E500)
 #if defined(CONFIG_E500)
 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_power9,
 		.cpu_setup		= __setup_cpu_power9,
 		.cpu_restore		= __restore_cpu_power9,
 		.cpu_restore		= __restore_cpu_power9,
 		.flush_tlb		= __flush_tlb_power9,
 		.flush_tlb		= __flush_tlb_power9,
+		.machine_check_early	= __machine_check_early_realmode_p9,
 		.platform		= "power9",
 		.platform		= "power9",
 	},
 	},
 	{	/* Power9 */
 	{	/* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_power9,
 		.cpu_setup		= __setup_cpu_power9,
 		.cpu_restore		= __restore_cpu_power9,
 		.cpu_restore		= __restore_cpu_power9,
 		.flush_tlb		= __flush_tlb_power9,
 		.flush_tlb		= __flush_tlb_power9,
+		.machine_check_early	= __machine_check_early_realmode_p9,
 		.platform		= "power9",
 		.platform		= "power9",
 	},
 	},
 	{	/* Cell Broadband Engine */
 	{	/* Cell Broadband Engine */

+ 86 - 2
arch/powerpc/kernel/mce.c

@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
 	case MCE_ERROR_TYPE_TLB:
 	case MCE_ERROR_TYPE_TLB:
 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
 		break;
 		break;
+	case MCE_ERROR_TYPE_USER:
+		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
+		break;
+	case MCE_ERROR_TYPE_RA:
+		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
+		break;
+	case MCE_ERROR_TYPE_LINK:
+		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
+		break;
 	case MCE_ERROR_TYPE_UNKNOWN:
 	case MCE_ERROR_TYPE_UNKNOWN:
 	default:
 	default:
 		break;
 		break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
 	mce->gpr3 = regs->gpr[3];
 	mce->gpr3 = regs->gpr[3];
 	mce->in_use = 1;
 	mce->in_use = 1;
 
 
-	mce->initiator = MCE_INITIATOR_CPU;
 	/* Mark it recovered if we have handled it and MSR(RI=1). */
 	/* Mark it recovered if we have handled it and MSR(RI=1). */
 	if (handled && (regs->msr & MSR_RI))
 	if (handled && (regs->msr & MSR_RI))
 		mce->disposition = MCE_DISPOSITION_RECOVERED;
 		mce->disposition = MCE_DISPOSITION_RECOVERED;
 	else
 	else
 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
-	mce->severity = MCE_SEV_ERROR_SYNC;
+
+	mce->initiator = mce_err->initiator;
+	mce->severity = mce_err->severity;
 
 
 	/*
 	/*
 	 * Populate the mce error_type and type-specific error_type.
 	 * Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
 		mce->u.erat_error.effective_address_provided = true;
 		mce->u.erat_error.effective_address_provided = true;
 		mce->u.erat_error.effective_address = addr;
 		mce->u.erat_error.effective_address = addr;
+	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
+		mce->u.user_error.effective_address_provided = true;
+		mce->u.user_error.effective_address = addr;
+	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
+		mce->u.ra_error.effective_address_provided = true;
+		mce->u.ra_error.effective_address = addr;
+	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
+		mce->u.link_error.effective_address_provided = true;
+		mce->u.link_error.effective_address = addr;
 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
 		mce->u.ue_error.effective_address_provided = true;
 		mce->u.ue_error.effective_address_provided = true;
 		mce->u.ue_error.effective_address = addr;
 		mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
 		"Parity",
 		"Parity",
 		"Multihit",
 		"Multihit",
 	};
 	};
+	static const char *mc_user_types[] = {
+		"Indeterminate",
+		"tlbie(l) invalid",
+	};
+	static const char *mc_ra_types[] = {
+		"Indeterminate",
+		"Instruction fetch (bad)",
+		"Page table walk ifetch (bad)",
+		"Page table walk ifetch (foreign)",
+		"Load (bad)",
+		"Store (bad)",
+		"Page table walk Load/Store (bad)",
+		"Page table walk Load/Store (foreign)",
+		"Load/Store (foreign)",
+	};
+	static const char *mc_link_types[] = {
+		"Indeterminate",
+		"Instruction fetch (timeout)",
+		"Page table walk ifetch (timeout)",
+		"Load (timeout)",
+		"Store (timeout)",
+		"Page table walk Load/Store (timeout)",
+	};
 
 
 	/* Print things out */
 	/* Print things out */
 	if (evt->version != MCE_V1) {
 	if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
 			printk("%s    Effective address: %016llx\n",
 			printk("%s    Effective address: %016llx\n",
 			       level, evt->u.tlb_error.effective_address);
 			       level, evt->u.tlb_error.effective_address);
 		break;
 		break;
+	case MCE_ERROR_TYPE_USER:
+		subtype = evt->u.user_error.user_error_type <
+			ARRAY_SIZE(mc_user_types) ?
+			mc_user_types[evt->u.user_error.user_error_type]
+			: "Unknown";
+		printk("%s  Error type: User [%s]\n", level, subtype);
+		if (evt->u.user_error.effective_address_provided)
+			printk("%s    Effective address: %016llx\n",
+			       level, evt->u.user_error.effective_address);
+		break;
+	case MCE_ERROR_TYPE_RA:
+		subtype = evt->u.ra_error.ra_error_type <
+			ARRAY_SIZE(mc_ra_types) ?
+			mc_ra_types[evt->u.ra_error.ra_error_type]
+			: "Unknown";
+		printk("%s  Error type: Real address [%s]\n", level, subtype);
+		if (evt->u.ra_error.effective_address_provided)
+			printk("%s    Effective address: %016llx\n",
+			       level, evt->u.ra_error.effective_address);
+		break;
+	case MCE_ERROR_TYPE_LINK:
+		subtype = evt->u.link_error.link_error_type <
+			ARRAY_SIZE(mc_link_types) ?
+			mc_link_types[evt->u.link_error.link_error_type]
+			: "Unknown";
+		printk("%s  Error type: Link [%s]\n", level, subtype);
+		if (evt->u.link_error.effective_address_provided)
+			printk("%s    Effective address: %016llx\n",
+			       level, evt->u.link_error.effective_address);
+		break;
 	default:
 	default:
 	case MCE_ERROR_TYPE_UNKNOWN:
 	case MCE_ERROR_TYPE_UNKNOWN:
 		printk("%s  Error type: Unknown\n", level);
 		printk("%s  Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
 		if (evt->u.tlb_error.effective_address_provided)
 		if (evt->u.tlb_error.effective_address_provided)
 			return evt->u.tlb_error.effective_address;
 			return evt->u.tlb_error.effective_address;
 		break;
 		break;
+	case MCE_ERROR_TYPE_USER:
+		if (evt->u.user_error.effective_address_provided)
+			return evt->u.user_error.effective_address;
+		break;
+	case MCE_ERROR_TYPE_RA:
+		if (evt->u.ra_error.effective_address_provided)
+			return evt->u.ra_error.effective_address;
+		break;
+	case MCE_ERROR_TYPE_LINK:
+		if (evt->u.link_error.effective_address_provided)
+			return evt->u.link_error.effective_address;
+		break;
 	default:
 	default:
 	case MCE_ERROR_TYPE_UNKNOWN:
 	case MCE_ERROR_TYPE_UNKNOWN:
 		break;
 		break;

+ 237 - 0
arch/powerpc/kernel/mce_power.c

@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
 }
 }
 #endif
 #endif
 
 
+static void flush_erat(void)
+{
+	asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+}
+
+#define MCE_FLUSH_SLB 1
+#define MCE_FLUSH_TLB 2
+#define MCE_FLUSH_ERAT 3
+
+static int mce_flush(int what)
+{
+#ifdef CONFIG_PPC_STD_MMU_64
+	if (what == MCE_FLUSH_SLB) {
+		flush_and_reload_slb();
+		return 1;
+	}
+#endif
+	if (what == MCE_FLUSH_ERAT) {
+		flush_erat();
+		return 1;
+	}
+	if (what == MCE_FLUSH_TLB) {
+		if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+			cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
+{
+	if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
+		dsisr &= ~slb;
+	if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
+		dsisr &= ~erat;
+	if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
+		dsisr &= ~tlb;
+	/* Any other errors we don't understand? */
+	if (dsisr)
+		return 0;
+	return 1;
+}
+
 static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
 static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
 {
 {
 	long handled = 1;
 	long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
 	long handled = 1;
 	long handled = 1;
 	struct mce_error_info mce_error_info = { 0 };
 	struct mce_error_info mce_error_info = { 0 };
 
 
+	mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+	mce_error_info.initiator = MCE_INITIATOR_CPU;
+
 	srr1 = regs->msr;
 	srr1 = regs->msr;
 	nip = regs->nip;
 	nip = regs->nip;
 
 
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
 	long handled = 1;
 	long handled = 1;
 	struct mce_error_info mce_error_info = { 0 };
 	struct mce_error_info mce_error_info = { 0 };
 
 
+	mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+	mce_error_info.initiator = MCE_INITIATOR_CPU;
+
 	srr1 = regs->msr;
 	srr1 = regs->msr;
 	nip = regs->nip;
 	nip = regs->nip;
 
 
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
 	save_mce_event(regs, handled, &mce_error_info, nip, addr);
 	save_mce_event(regs, handled, &mce_error_info, nip, addr);
 	return handled;
 	return handled;
 }
 }
+
+static int mce_handle_derror_p9(struct pt_regs *regs)
+{
+	uint64_t dsisr = regs->dsisr;
+
+	return mce_handle_flush_derrors(dsisr,
+			P9_DSISR_MC_SLB_PARITY_MFSLB |
+			P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
+
+			P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
+
+			P9_DSISR_MC_ERAT_MULTIHIT);
+}
+
+static int mce_handle_ierror_p9(struct pt_regs *regs)
+{
+	uint64_t srr1 = regs->msr;
+
+	switch (P9_SRR1_MC_IFETCH(srr1)) {
+	case P9_SRR1_MC_IFETCH_SLB_PARITY:
+	case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+		return mce_flush(MCE_FLUSH_SLB);
+	case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+		return mce_flush(MCE_FLUSH_TLB);
+	case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+		return mce_flush(MCE_FLUSH_ERAT);
+	default:
+		return 0;
+	}
+}
+
+static void mce_get_derror_p9(struct pt_regs *regs,
+		struct mce_error_info *mce_err, uint64_t *addr)
+{
+	uint64_t dsisr = regs->dsisr;
+
+	mce_err->severity = MCE_SEV_ERROR_SYNC;
+	mce_err->initiator = MCE_INITIATOR_CPU;
+
+	if (dsisr & P9_DSISR_MC_USER_TLBIE)
+		*addr = regs->nip;
+	else
+		*addr = regs->dar;
+
+	if (dsisr & P9_DSISR_MC_UE) {
+		mce_err->error_type = MCE_ERROR_TYPE_UE;
+		mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+	} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
+		mce_err->error_type = MCE_ERROR_TYPE_UE;
+		mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+	} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
+		mce_err->error_type = MCE_ERROR_TYPE_LINK;
+		mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
+	} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
+		mce_err->error_type = MCE_ERROR_TYPE_LINK;
+		mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
+	} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
+		mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+		mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+	} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+		mce_err->error_type = MCE_ERROR_TYPE_TLB;
+		mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+	} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
+		mce_err->error_type = MCE_ERROR_TYPE_USER;
+		mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
+	} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
+		mce_err->error_type = MCE_ERROR_TYPE_SLB;
+		mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+	} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
+		mce_err->error_type = MCE_ERROR_TYPE_SLB;
+		mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+	} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
+	} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+	} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+	} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+	}
+}
+
+static void mce_get_ierror_p9(struct pt_regs *regs,
+		struct mce_error_info *mce_err, uint64_t *addr)
+{
+	uint64_t srr1 = regs->msr;
+
+	switch (P9_SRR1_MC_IFETCH(srr1)) {
+	case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+	case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+		mce_err->severity = MCE_SEV_FATAL;
+		break;
+	default:
+		mce_err->severity = MCE_SEV_ERROR_SYNC;
+		break;
+	}
+
+	mce_err->initiator = MCE_INITIATOR_CPU;
+
+	*addr = regs->nip;
+
+	switch (P9_SRR1_MC_IFETCH(srr1)) {
+	case P9_SRR1_MC_IFETCH_UE:
+		mce_err->error_type = MCE_ERROR_TYPE_UE;
+		mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+		break;
+	case P9_SRR1_MC_IFETCH_SLB_PARITY:
+		mce_err->error_type = MCE_ERROR_TYPE_SLB;
+		mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+		break;
+	case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+		mce_err->error_type = MCE_ERROR_TYPE_SLB;
+		mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+		break;
+	case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+		mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+		mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+		break;
+	case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+		mce_err->error_type = MCE_ERROR_TYPE_TLB;
+		mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+		break;
+	case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+		mce_err->error_type = MCE_ERROR_TYPE_UE;
+		mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+		break;
+	case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
+		mce_err->error_type = MCE_ERROR_TYPE_LINK;
+		mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
+		break;
+	case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
+		mce_err->error_type = MCE_ERROR_TYPE_LINK;
+		mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
+		break;
+	case P9_SRR1_MC_IFETCH_RA:
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
+		break;
+	case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
+		break;
+	case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
+		break;
+	case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+		mce_err->error_type = MCE_ERROR_TYPE_LINK;
+		mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
+		break;
+	case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
+		mce_err->error_type = MCE_ERROR_TYPE_RA;
+		mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
+		break;
+	default:
+		break;
+	}
+}
+
+long __machine_check_early_realmode_p9(struct pt_regs *regs)
+{
+	uint64_t nip, addr;
+	long handled;
+	struct mce_error_info mce_error_info = { 0 };
+
+	nip = regs->nip;
+
+	if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
+		handled = mce_handle_derror_p9(regs);
+		mce_get_derror_p9(regs, &mce_error_info, &addr);
+	} else {
+		handled = mce_handle_ierror_p9(regs);
+		mce_get_ierror_p9(regs, &mce_error_info, &addr);
+	}
+
+	/* Handle UE error. */
+	if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+		handled = mce_handle_ue_error(regs);
+
+	save_mce_event(regs, handled, &mce_error_info, nip, addr);
+	return handled;
+}

+ 2 - 0
arch/powerpc/perf/core-book3s.c

@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 			sdsync = POWER7P_MMCRA_SDAR_VALID;
 			sdsync = POWER7P_MMCRA_SDAR_VALID;
 		else if (ppmu->flags & PPMU_ALT_SIPR)
 		else if (ppmu->flags & PPMU_ALT_SIPR)
 			sdsync = POWER6_MMCRA_SDSYNC;
 			sdsync = POWER6_MMCRA_SDSYNC;
+		else if (ppmu->flags & PPMU_NO_SIAR)
+			sdsync = MMCRA_SAMPLE_ENABLE;
 		else
 		else
 			sdsync = MMCRA_SDSYNC;
 			sdsync = MMCRA_SDSYNC;
 
 

+ 36 - 7
arch/powerpc/perf/isa207-common.c

@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
 	return !(event & ~valid_mask);
 	return !(event & ~valid_mask);
 }
 }
 
 
-static u64 mmcra_sdar_mode(u64 event)
+static inline bool is_event_marked(u64 event)
 {
 {
-	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
-		return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+	if (event & EVENT_IS_MARKED)
+		return true;
+
+	return false;
+}
 
 
-	return MMCRA_SDAR_MODE_TLB;
+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
+{
+	/*
+	 * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
+	 * continous sampling mode.
+	 *
+	 * Incase of Power8:
+	 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+	 * mode and will be un-changed when setting MMCRA[63] (Marked events).
+	 *
+	 * Incase of Power9:
+	 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
+	 *               or if group already have any marked events.
+	 * Non-Marked events (for DD1):
+	 *	MMCRA[SDAR_MODE] will be set to 0b01
+	 * For rest
+	 *	MMCRA[SDAR_MODE] will be set from event code.
+	 */
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
+			*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
+		else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+			*mmcra |=  p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+		else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			*mmcra |= MMCRA_SDAR_MODE_TLB;
+	} else
+		*mmcra |= MMCRA_SDAR_MODE_TLB;
 }
 }
 
 
 static u64 thresh_cmp_val(u64 value)
 static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 		value |= CNST_L1_QUAL_VAL(cache);
 		value |= CNST_L1_QUAL_VAL(cache);
 	}
 	}
 
 
-	if (event & EVENT_IS_MARKED) {
+	if (is_event_marked(event)) {
 		mask  |= CNST_SAMPLE_MASK;
 		mask  |= CNST_SAMPLE_MASK;
 		value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
 		value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
 	}
 	}
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
 		}
 		}
 
 
 		/* In continuous sampling mode, update SDAR on TLB miss */
 		/* In continuous sampling mode, update SDAR on TLB miss */
-		mmcra |= mmcra_sdar_mode(event[i]);
+		mmcra_sdar_mode(event[i], &mmcra);
 
 
 		if (event[i] & EVENT_IS_L1) {
 		if (event[i] & EVENT_IS_L1) {
 			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
 			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
 			mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
 			mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
 		}
 		}
 
 
-		if (event[i] & EVENT_IS_MARKED) {
+		if (is_event_marked(event[i])) {
 			mmcra |= MMCRA_SAMPLE_ENABLE;
 			mmcra |= MMCRA_SAMPLE_ENABLE;
 
 
 			val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
 			val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;

+ 1 - 0
arch/powerpc/perf/isa207-common.h

@@ -246,6 +246,7 @@
 #define MMCRA_THR_CMP_SHIFT		32
 #define MMCRA_THR_CMP_SHIFT		32
 #define MMCRA_SDAR_MODE_SHIFT		42
 #define MMCRA_SDAR_MODE_SHIFT		42
 #define MMCRA_SDAR_MODE_TLB		(1ull << MMCRA_SDAR_MODE_SHIFT)
 #define MMCRA_SDAR_MODE_TLB		(1ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_NO_UPDATES	~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
 #define MMCRA_IFM_SHIFT			30
 #define MMCRA_IFM_SHIFT			30
 
 
 /* MMCR1 Threshold Compare bit constant for power9 */
 /* MMCR1 Threshold Compare bit constant for power9 */

+ 6 - 15
arch/powerpc/platforms/powernv/opal.c

@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
 					struct machine_check_event *evt)
 					struct machine_check_event *evt)
 {
 {
 	int recovered = 0;
 	int recovered = 0;
-	uint64_t ea = get_mce_fault_addr(evt);
 
 
 	if (!(regs->msr & MSR_RI)) {
 	if (!(regs->msr & MSR_RI)) {
 		/* If MSR_RI isn't set, we cannot recover */
 		/* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
 	} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
 	} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
 		/* Platform corrected itself */
 		/* Platform corrected itself */
 		recovered = 1;
 		recovered = 1;
-	} else if (ea && !is_kernel_addr(ea)) {
+	} else if (evt->severity == MCE_SEV_FATAL) {
+		/* Fatal machine check */
+		pr_err("Machine check interrupt is fatal\n");
+		recovered = 0;
+	} else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
+			(user_mode(regs) && !is_global_init(current))) {
 		/*
 		/*
-		 * Faulting address is not in kernel text. We should be fine.
-		 * We need to find which process uses this address.
 		 * For now, kill the task if we have received exception when
 		 * For now, kill the task if we have received exception when
 		 * in userspace.
 		 * in userspace.
 		 *
 		 *
 		 * TODO: Queue up this address for hwpoisioning later.
 		 * TODO: Queue up this address for hwpoisioning later.
 		 */
 		 */
-		if (user_mode(regs) && !is_global_init(current)) {
-			_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
-			recovered = 1;
-		} else
-			recovered = 0;
-	} else if (user_mode(regs) && !is_global_init(current) &&
-		evt->severity == MCE_SEV_ERROR_SYNC) {
-		/*
-		 * If we have received a synchronous error when in userspace
-		 * kill the task.
-		 */
 		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
 		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
 		recovered = 1;
 		recovered = 1;
 	}
 	}

+ 15 - 5
arch/powerpc/platforms/powernv/pci-ioda.c

@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
 }
 }
 
 
 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
-				   struct pci_bus *bus)
+				   struct pci_bus *bus,
+				   bool add_to_group)
 {
 {
 	struct pci_dev *dev;
 	struct pci_dev *dev;
 
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
 		set_dma_offset(&dev->dev, pe->tce_bypass_base);
 		set_dma_offset(&dev->dev, pe->tce_bypass_base);
-		iommu_add_device(&dev->dev);
+		if (add_to_group)
+			iommu_add_device(&dev->dev);
 
 
 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
-			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+			pnv_ioda_setup_bus_dma(pe, dev->subordinate,
+					add_to_group);
 	}
 	}
 }
 }
 
 
@@ -2191,7 +2194,7 @@ found:
 		set_iommu_table_base(&pe->pdev->dev, tbl);
 		set_iommu_table_base(&pe->pdev->dev, tbl);
 		iommu_add_device(&pe->pdev->dev);
 		iommu_add_device(&pe->pdev->dev);
 	} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
 	} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
-		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+		pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
 
 
 	return;
 	return;
  fail:
  fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
 
 
 	pnv_pci_ioda2_set_bypass(pe, false);
 	pnv_pci_ioda2_set_bypass(pe, false);
 	pnv_pci_ioda2_unset_window(&pe->table_group, 0);
 	pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+	if (pe->pbus)
+		pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
 	pnv_ioda2_table_free(tbl);
 	pnv_ioda2_table_free(tbl);
 }
 }
 
 
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
 						table_group);
 						table_group);
 
 
 	pnv_pci_ioda2_setup_default_config(pe);
 	pnv_pci_ioda2_setup_default_config(pe);
+	if (pe->pbus)
+		pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
 }
 }
 
 
 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	level_shift = entries_shift + 3;
 	level_shift = entries_shift + 3;
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
 
+	if ((level_shift - 3) * levels + page_shift >= 60)
+		return -EINVAL;
+
 	/* Allocate TCE table */
 	/* Allocate TCE table */
 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
 			levels, tce_table_size, &offset, &total_allocated);
 			levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
 	if (pe->flags & PNV_IODA_PE_DEV)
 	if (pe->flags & PNV_IODA_PE_DEV)
 		iommu_add_device(&pe->pdev->dev);
 		iommu_add_device(&pe->pdev->dev);
 	else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
 	else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
-		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+		pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
 }
 }
 
 
 #ifdef CONFIG_PCI_MSI
 #ifdef CONFIG_PCI_MSI

+ 3 - 1
arch/powerpc/platforms/pseries/lpar.c

@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
 	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
 	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
-	mmu_hash_ops.resize_hpt		 = pseries_lpar_resize_hpt;
+
+	if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+		mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
 }
 }
 
 
 void radix_init_pseries(void)
 void radix_init_pseries(void)

+ 14 - 2
arch/x86/events/core.c

@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
 
 
 static void refresh_pce(void *ignored)
 static void refresh_pce(void *ignored)
 {
 {
-	if (current->mm)
-		load_mm_cr4(current->mm);
+	if (current->active_mm)
+		load_mm_cr4(current->active_mm);
 }
 }
 
 
 static void x86_pmu_event_mapped(struct perf_event *event)
 static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
 	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
 	if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
 		return;
 		return;
 
 
+	/*
+	 * This function relies on not being called concurrently in two
+	 * tasks in the same mm.  Otherwise one task could observe
+	 * perf_rdpmc_allowed > 1 and return all the way back to
+	 * userspace with CR4.PCE clear while another task is still
+	 * doing on_each_cpu_mask() to propagate CR4.PCE.
+	 *
+	 * For now, this can't happen because all callers hold mmap_sem
+	 * for write.  If this changes, we'll need a different solution.
+	 */
+	lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+
 	if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
 	if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
 		on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 		on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 }
 }

+ 0 - 3
arch/x86/include/asm/pgtable-3level.h

@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
 	*(tmp + 1) = 0;
 	*(tmp + 1) = 0;
 }
 }
 
 
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
-		defined(CONFIG_PARAVIRT))
 static inline void native_pud_clear(pud_t *pudp)
 static inline void native_pud_clear(pud_t *pudp)
 {
 {
 }
 }
-#endif
 
 
 static inline void pud_clear(pud_t *pudp)
 static inline void pud_clear(pud_t *pudp)
 {
 {

+ 1 - 1
arch/x86/include/asm/pgtable.h

@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
 #endif
 #endif
 
 
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
 #define pud_clear(pud)			native_pud_clear(pud)
 #define pud_clear(pud)			native_pud_clear(pud)
 #endif
 #endif
 
 

+ 7 - 2
arch/x86/kernel/acpi/boot.c

@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	if (!enabled) {
+		++disabled_cpus;
+		return -EINVAL;
+	}
+
 	if (boot_cpu_physical_apicid != -1U)
 	if (boot_cpu_physical_apicid != -1U)
 		ver = boot_cpu_apic_version;
 		ver = boot_cpu_apic_version;
 
 
-	cpu = __generic_processor_info(id, ver, enabled);
+	cpu = generic_processor_info(id, ver);
 	if (cpu >= 0)
 	if (cpu >= 0)
 		early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
 		early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
 
 
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 #include <acpi/processor.h>
 
 
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 {
 #ifdef CONFIG_ACPI_NUMA
 #ifdef CONFIG_ACPI_NUMA
 	int nid;
 	int nid;

+ 7 - 19
arch/x86/kernel/apic/apic.c

@@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
 	return nr_logical_cpuids++;
 	return nr_logical_cpuids++;
 }
 }
 
 
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
 {
 {
 	int cpu, max = nr_cpu_ids;
 	int cpu, max = nr_cpu_ids;
 	bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
 	bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
 	if (num_processors >= nr_cpu_ids) {
 	if (num_processors >= nr_cpu_ids) {
 		int thiscpu = max + disabled_cpus;
 		int thiscpu = max + disabled_cpus;
 
 
-		if (enabled) {
-			pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
-				   "reached. Processor %d/0x%x ignored.\n",
-				   max, thiscpu, apicid);
-		}
+		pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+			   "reached. Processor %d/0x%x ignored.\n",
+			   max, thiscpu, apicid);
 
 
 		disabled_cpus++;
 		disabled_cpus++;
 		return -EINVAL;
 		return -EINVAL;
@@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
 		apic->x86_32_early_logical_apicid(cpu);
 		apic->x86_32_early_logical_apicid(cpu);
 #endif
 #endif
 	set_cpu_possible(cpu, true);
 	set_cpu_possible(cpu, true);
-
-	if (enabled) {
-		num_processors++;
-		physid_set(apicid, phys_cpu_present_map);
-		set_cpu_present(cpu, true);
-	} else {
-		disabled_cpus++;
-	}
+	physid_set(apicid, phys_cpu_present_map);
+	set_cpu_present(cpu, true);
+	num_processors++;
 
 
 	return cpu;
 	return cpu;
 }
 }
 
 
-int generic_processor_info(int apicid, int version)
-{
-	return __generic_processor_info(apicid, version, true);
-}
-
 int hard_smp_processor_id(void)
 int hard_smp_processor_id(void)
 {
 {
 	return read_apic_id();
 	return read_apic_id();

+ 1 - 1
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

@@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
 	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
 	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
 	    (rdtgrp->flags & RDT_DELETED)) {
 	    (rdtgrp->flags & RDT_DELETED)) {
 		kernfs_unbreak_active_protection(kn);
 		kernfs_unbreak_active_protection(kn);
-		kernfs_put(kn);
+		kernfs_put(rdtgrp->kn);
 		kfree(rdtgrp);
 		kfree(rdtgrp);
 	} else {
 	} else {
 		kernfs_unbreak_active_protection(kn);
 		kernfs_unbreak_active_protection(kn);

+ 1 - 0
arch/x86/kernel/head64.c

@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  */
  */
 
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <linux/types.h>

+ 2 - 4
arch/x86/kernel/nmi.c

@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
 	spin_lock_irqsave(&desc->lock, flags);
 	spin_lock_irqsave(&desc->lock, flags);
 
 
 	/*
 	/*
-	 * most handlers of type NMI_UNKNOWN never return because
-	 * they just assume the NMI is theirs.  Just a sanity check
-	 * to manage expectations
+	 * Indicate if there are multiple registrations on the
+	 * internal NMI handler call chains (SERR and IO_CHECK).
 	 */
 	 */
-	WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
 	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
 	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
 	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
 	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
 
 

+ 2 - 0
arch/x86/kernel/tsc.c

@@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
 	 * the refined calibration and directly register it as a clocksource.
 	 * the refined calibration and directly register it as a clocksource.
 	 */
 	 */
 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+		if (boot_cpu_has(X86_FEATURE_ART))
+			art_related_clocksource = &clocksource_tsc;
 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
 		return 0;
 		return 0;
 	}
 	}

+ 30 - 6
arch/x86/kernel/unwind_frame.c

@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
 	return sizeof(*regs);
 	return sizeof(*regs);
 }
 }
 
 
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
 static bool is_last_task_frame(struct unwind_state *state)
 static bool is_last_task_frame(struct unwind_state *state)
 {
 {
-	unsigned long bp = (unsigned long)state->bp;
-	unsigned long regs = (unsigned long)task_pt_regs(state->task);
+	unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+	unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
 
 
 	/*
 	/*
 	 * We have to check for the last task frame at two different locations
 	 * We have to check for the last task frame at two different locations
 	 * because gcc can occasionally decide to realign the stack pointer and
 	 * because gcc can occasionally decide to realign the stack pointer and
-	 * change the offset of the stack frame by a word in the prologue of a
-	 * function called by head/entry code.
+	 * change the offset of the stack frame in the prologue of a function
+	 * called by head/entry code.  Examples:
+	 *
+	 * <start_secondary>:
+	 *      push   %edi
+	 *      lea    0x8(%esp),%edi
+	 *      and    $0xfffffff8,%esp
+	 *      pushl  -0x4(%edi)
+	 *      push   %ebp
+	 *      mov    %esp,%ebp
+	 *
+	 * <x86_64_start_kernel>:
+	 *      lea    0x8(%rsp),%r10
+	 *      and    $0xfffffffffffffff0,%rsp
+	 *      pushq  -0x8(%r10)
+	 *      push   %rbp
+	 *      mov    %rsp,%rbp
+	 *
+	 * Note that after aligning the stack, it pushes a duplicate copy of
+	 * the return address before pushing the frame pointer.
 	 */
 	 */
-	return bp == regs - FRAME_HEADER_SIZE ||
-	       bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+	return (state->bp == last_bp ||
+		(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
 }
 }
 
 
 /*
 /*

+ 1 - 0
arch/x86/mm/kasan_init_64.c

@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
 #define pr_fmt(fmt) "kasan: " fmt
 #define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
 #include <linux/kasan.h>

+ 1 - 1
arch/x86/mm/mpx.c

@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
  * we might run off the end of the bounds table if we are on
  * we might run off the end of the bounds table if we are on
  * a 64-bit kernel and try to get 8 bytes.
  * a 64-bit kernel and try to get 8 bytes.
  */
  */
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
 		long __user *bd_entry_ptr)
 		long __user *bd_entry_ptr)
 {
 {
 	u32 bd_entry_32;
 	u32 bd_entry_32;

+ 1 - 0
arch/x86/platform/intel-mid/device_libs/Makefile

@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
 obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
 obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o

+ 82 - 0
arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c

@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+	{
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+	.name		= "msic_power_btn",
+	.id		= PLATFORM_DEVID_NONE,
+	.num_resources	= ARRAY_SIZE(mrfld_power_btn_resources),
+	.resource	= mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+					     unsigned long code, void *data)
+{
+	if (code == SCU_DOWN) {
+		platform_device_unregister(&mrfld_power_btn_dev);
+		return 0;
+	}
+
+	return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+	.notifier_call	= mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+	if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+		return -ENODEV;
+
+	/*
+	 * We need to be sure that the SCU IPC is ready before
+	 * PMIC power button device can be registered:
+	 */
+	intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+	return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+	struct resource *res = mrfld_power_btn_resources;
+	struct sfi_device_table_entry *pentry = info;
+
+	res->start = res->end = pentry->irq;
+	return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+	.name			= "bcove_power_btn",
+	.type			= SFI_DEV_TYPE_IPC,
+	.delay			= 1,
+	.msic			= 1,
+	.get_platform_data	= &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);

+ 1 - 1
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c

@@ -19,7 +19,7 @@
 #include <asm/intel_scu_ipc.h>
 #include <asm/intel_scu_ipc.h>
 #include <asm/io_apic.h>
 #include <asm/io_apic.h>
 
 
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
 
 
 static struct platform_device wdt_dev = {
 static struct platform_device wdt_dev = {
 	.name = "intel_mid_wdt",
 	.name = "intel_mid_wdt",

+ 4 - 11
arch/x86/platform/intel-mid/mfld.c

@@ -17,16 +17,6 @@
 
 
 #include "intel_mid_weak_decls.h"
 #include "intel_mid_weak_decls.h"
 
 
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
-	.arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
 static unsigned long __init mfld_calibrate_tsc(void)
 static unsigned long __init mfld_calibrate_tsc(void)
 {
 {
 	unsigned long fast_calibrate;
 	unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
 static void __init penwell_arch_setup(void)
 static void __init penwell_arch_setup(void)
 {
 {
 	x86_platform.calibrate_tsc = mfld_calibrate_tsc;
 	x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-	pm_power_off = mfld_power_off;
 }
 }
 
 
+static struct intel_mid_ops penwell_ops = {
+	.arch_setup = penwell_arch_setup,
+};
+
 void *get_penwell_ops(void)
 void *get_penwell_ops(void)
 {
 {
 	return &penwell_ops;
 	return &penwell_ops;

+ 9 - 3
block/bio.c

@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
 	bio_list_init(&punt);
 	bio_list_init(&punt);
 	bio_list_init(&nopunt);
 	bio_list_init(&nopunt);
 
 
-	while ((bio = bio_list_pop(current->bio_list)))
+	while ((bio = bio_list_pop(&current->bio_list[0])))
 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+	current->bio_list[0] = nopunt;
 
 
-	*current->bio_list = nopunt;
+	bio_list_init(&nopunt);
+	while ((bio = bio_list_pop(&current->bio_list[1])))
+		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+	current->bio_list[1] = nopunt;
 
 
 	spin_lock(&bs->rescue_lock);
 	spin_lock(&bs->rescue_lock);
 	bio_list_merge(&bs->rescue_list, &punt);
 	bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 		 * we retry with the original gfp_flags.
 		 * we retry with the original gfp_flags.
 		 */
 		 */
 
 
-		if (current->bio_list && !bio_list_empty(current->bio_list))
+		if (current->bio_list &&
+		    (!bio_list_empty(&current->bio_list[0]) ||
+		     !bio_list_empty(&current->bio_list[1])))
 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 
 
 		p = mempool_alloc(bs->bio_pool, gfp_mask);
 		p = mempool_alloc(bs->bio_pool, gfp_mask);

+ 18 - 12
block/blk-core.c

@@ -1973,7 +1973,14 @@ end_io:
  */
  */
 blk_qc_t generic_make_request(struct bio *bio)
 blk_qc_t generic_make_request(struct bio *bio)
 {
 {
-	struct bio_list bio_list_on_stack;
+	/*
+	 * bio_list_on_stack[0] contains bios submitted by the current
+	 * make_request_fn.
+	 * bio_list_on_stack[1] contains bios that were submitted before
+	 * the current make_request_fn, but that haven't been processed
+	 * yet.
+	 */
+	struct bio_list bio_list_on_stack[2];
 	blk_qc_t ret = BLK_QC_T_NONE;
 	blk_qc_t ret = BLK_QC_T_NONE;
 
 
 	if (!generic_make_request_checks(bio))
 	if (!generic_make_request_checks(bio))
@@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
 	 * should be added at the tail
 	 * should be added at the tail
 	 */
 	 */
 	if (current->bio_list) {
 	if (current->bio_list) {
-		bio_list_add(current->bio_list, bio);
+		bio_list_add(&current->bio_list[0], bio);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
 	 * bio_list, and call into ->make_request() again.
 	 * bio_list, and call into ->make_request() again.
 	 */
 	 */
 	BUG_ON(bio->bi_next);
 	BUG_ON(bio->bi_next);
-	bio_list_init(&bio_list_on_stack);
-	current->bio_list = &bio_list_on_stack;
+	bio_list_init(&bio_list_on_stack[0]);
+	current->bio_list = bio_list_on_stack;
 	do {
 	do {
 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
 
 		if (likely(blk_queue_enter(q, false) == 0)) {
 		if (likely(blk_queue_enter(q, false) == 0)) {
-			struct bio_list hold;
 			struct bio_list lower, same;
 			struct bio_list lower, same;
 
 
 			/* Create a fresh bio_list for all subordinate requests */
 			/* Create a fresh bio_list for all subordinate requests */
-			hold = bio_list_on_stack;
-			bio_list_init(&bio_list_on_stack);
+			bio_list_on_stack[1] = bio_list_on_stack[0];
+			bio_list_init(&bio_list_on_stack[0]);
 			ret = q->make_request_fn(q, bio);
 			ret = q->make_request_fn(q, bio);
 
 
 			blk_queue_exit(q);
 			blk_queue_exit(q);
@@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
 			 */
 			 */
 			bio_list_init(&lower);
 			bio_list_init(&lower);
 			bio_list_init(&same);
 			bio_list_init(&same);
-			while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
+			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
 				if (q == bdev_get_queue(bio->bi_bdev))
 				if (q == bdev_get_queue(bio->bi_bdev))
 					bio_list_add(&same, bio);
 					bio_list_add(&same, bio);
 				else
 				else
 					bio_list_add(&lower, bio);
 					bio_list_add(&lower, bio);
 			/* now assemble so we handle the lowest level first */
 			/* now assemble so we handle the lowest level first */
-			bio_list_merge(&bio_list_on_stack, &lower);
-			bio_list_merge(&bio_list_on_stack, &same);
-			bio_list_merge(&bio_list_on_stack, &hold);
+			bio_list_merge(&bio_list_on_stack[0], &lower);
+			bio_list_merge(&bio_list_on_stack[0], &same);
+			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
 		} else {
 		} else {
 			bio_io_error(bio);
 			bio_io_error(bio);
 		}
 		}
-		bio = bio_list_pop(current->bio_list);
+		bio = bio_list_pop(&bio_list_on_stack[0]);
 	} while (bio);
 	} while (bio);
 	current->bio_list = NULL; /* deactivate */
 	current->bio_list = NULL; /* deactivate */
 
 

+ 3 - 0
block/blk-mq-tag.c

@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
 	for (i = 0; i < set->nr_hw_queues; i++) {
 	for (i = 0; i < set->nr_hw_queues; i++) {
 		struct blk_mq_tags *tags = set->tags[i];
 		struct blk_mq_tags *tags = set->tags[i];
 
 
+		if (!tags)
+			continue;
+
 		for (j = 0; j < tags->nr_tags; j++) {
 		for (j = 0; j < tags->nr_tags; j++) {
 			if (!tags->static_rqs[j])
 			if (!tags->static_rqs[j])
 				continue;
 				continue;

+ 5 - 4
block/blk-mq.c

@@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 }
 
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+				      bool may_sleep)
 {
 {
 	struct request_queue *q = rq->q;
 	struct request_queue *q = rq->q;
 	struct blk_mq_queue_data bd = {
 	struct blk_mq_queue_data bd = {
@@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 	}
 	}
 
 
 insert:
 insert:
-	blk_mq_sched_insert_request(rq, false, true, true, false);
+	blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
 }
 }
 
 
 /*
 /*
@@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 
 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
 			rcu_read_lock();
 			rcu_read_lock();
-			blk_mq_try_issue_directly(old_rq, &cookie);
+			blk_mq_try_issue_directly(old_rq, &cookie, false);
 			rcu_read_unlock();
 			rcu_read_unlock();
 		} else {
 		} else {
 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-			blk_mq_try_issue_directly(old_rq, &cookie);
+			blk_mq_try_issue_directly(old_rq, &cookie, true);
 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
 		}
 		}
 		goto done;
 		goto done;

+ 5 - 4
crypto/af_alg.c

@@ -266,7 +266,7 @@ unlock:
 	return err;
 	return err;
 }
 }
 
 
-int af_alg_accept(struct sock *sk, struct socket *newsock)
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
 {
 {
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
 	const struct af_alg_type *type;
 	const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
 	if (!type)
 	if (!type)
 		goto unlock;
 		goto unlock;
 
 
-	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
+	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
 	err = -ENOMEM;
 	err = -ENOMEM;
 	if (!sk2)
 	if (!sk2)
 		goto unlock;
 		goto unlock;
@@ -323,9 +323,10 @@ unlock:
 }
 }
 EXPORT_SYMBOL_GPL(af_alg_accept);
 EXPORT_SYMBOL_GPL(af_alg_accept);
 
 
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
+		      bool kern)
 {
 {
-	return af_alg_accept(sock->sk, newsock);
+	return af_alg_accept(sock->sk, newsock, kern);
 }
 }
 
 
 static const struct proto_ops alg_proto_ops = {
 static const struct proto_ops alg_proto_ops = {

+ 5 - 4
crypto/algif_hash.c

@@ -239,7 +239,8 @@ unlock:
 	return err ?: len;
 	return err ?: len;
 }
 }
 
 
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
+		       bool kern)
 {
 {
 	struct sock *sk = sock->sk;
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	err = af_alg_accept(ask->parent, newsock);
+	err = af_alg_accept(ask->parent, newsock, kern);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
 }
 }
 
 
 static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
 static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
-			     int flags)
+			     int flags, bool kern)
 {
 {
 	int err;
 	int err;
 
 
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	return hash_accept(sock, newsock, flags);
+	return hash_accept(sock, newsock, flags, kern);
 }
 }
 
 
 static struct proto_ops algif_hash_ops_nokey = {
 static struct proto_ops algif_hash_ops_nokey = {

+ 42 - 15
drivers/acpi/acpi_processor.c

@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
 
 
 void __weak arch_unregister_cpu(int cpu) {}
 void __weak arch_unregister_cpu(int cpu) {}
 
 
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-	return -ENODEV;
-}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
 {
 	unsigned long long sta;
 	unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
 		pr->acpi_id = value;
 		pr->acpi_id = value;
 	}
 	}
 
 
+	if (acpi_duplicate_processor_id(pr->acpi_id)) {
+		dev_err(&device->dev,
+			"Failed to get unique processor _UID (0x%x)\n",
+			pr->acpi_id);
+		return -ENODEV;
+	}
+
 	pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
 	pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
 					pr->acpi_id);
 					pr->acpi_id);
 	if (invalid_phys_cpuid(pr->phys_id))
 	if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
 static int nr_unique_ids __initdata;
 static int nr_unique_ids __initdata;
 
 
 /* The number of the duplicate processor IDs */
 /* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
 
 
 /* Used to store the unique processor IDs */
 /* Used to store the unique processor IDs */
 static int unique_processor_ids[] __initdata = {
 static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
 };
 };
 
 
 /* Used to store the duplicate processor IDs */
 /* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
 	[0 ... NR_CPUS - 1] = -1,
 	[0 ... NR_CPUS - 1] = -1,
 };
 };
 
 
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
 						  void **rv)
 						  void **rv)
 {
 {
 	acpi_status status;
 	acpi_status status;
+	acpi_object_type acpi_type;
+	unsigned long long uid;
 	union acpi_object object = { 0 };
 	union acpi_object object = { 0 };
 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 
 
-	status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+	status = acpi_get_type(handle, &acpi_type);
 	if (ACPI_FAILURE(status))
 	if (ACPI_FAILURE(status))
-		acpi_handle_info(handle, "Not get the processor object\n");
-	else
-		processor_validated_ids_update(object.processor.proc_id);
+		return false;
+
+	switch (acpi_type) {
+	case ACPI_TYPE_PROCESSOR:
+		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+		if (ACPI_FAILURE(status))
+			goto err;
+		uid = object.processor.proc_id;
+		break;
+
+	case ACPI_TYPE_DEVICE:
+		status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+		if (ACPI_FAILURE(status))
+			goto err;
+		break;
+	default:
+		goto err;
+	}
+
+	processor_validated_ids_update(uid);
+	return true;
+
+err:
+	acpi_handle_info(handle, "Invalid processor object\n");
+	return false;
 
 
-	return AE_OK;
 }
 }
 
 
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
 {
 {
-	/* Search all processor nodes in ACPI namespace */
+	/* check the correctness for all processors in ACPI namespace */
 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
 						ACPI_UINT32_MAX,
 						ACPI_UINT32_MAX,
 						acpi_processor_ids_walk,
 						acpi_processor_ids_walk,
 						NULL, NULL, NULL);
 						NULL, NULL, NULL);
+	acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+						NULL, NULL);
 }
 }
 
 
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
 {
 {
 	int i;
 	int i;
 
 

+ 0 - 1
drivers/acpi/bus.c

@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
 	acpi_wakeup_device_init();
 	acpi_wakeup_device_init();
 	acpi_debugger_init();
 	acpi_debugger_init();
 	acpi_setup_sb_notify_handler();
 	acpi_setup_sb_notify_handler();
-	acpi_set_processor_mapping();
 	return 0;
 	return 0;
 }
 }
 
 

+ 22 - 111
drivers/acpi/processor_core.c

@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
 }
 }
 
 
 static int map_lapic_id(struct acpi_subtable_header *entry,
 static int map_lapic_id(struct acpi_subtable_header *entry,
-		 u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+		 u32 acpi_id, phys_cpuid_t *apic_id)
 {
 {
 	struct acpi_madt_local_apic *lapic =
 	struct acpi_madt_local_apic *lapic =
 		container_of(entry, struct acpi_madt_local_apic, header);
 		container_of(entry, struct acpi_madt_local_apic, header);
 
 
-	if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+	if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	if (lapic->processor_id != acpi_id)
 	if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
 }
 }
 
 
 static int map_x2apic_id(struct acpi_subtable_header *entry,
 static int map_x2apic_id(struct acpi_subtable_header *entry,
-		int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-		bool ignore_disabled)
+		int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
 {
 	struct acpi_madt_local_x2apic *apic =
 	struct acpi_madt_local_x2apic *apic =
 		container_of(entry, struct acpi_madt_local_x2apic, header);
 		container_of(entry, struct acpi_madt_local_x2apic, header);
 
 
-	if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+	if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	if (device_declaration && (apic->uid == acpi_id)) {
 	if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
 }
 }
 
 
 static int map_lsapic_id(struct acpi_subtable_header *entry,
 static int map_lsapic_id(struct acpi_subtable_header *entry,
-		int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-		bool ignore_disabled)
+		int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
 {
 	struct acpi_madt_local_sapic *lsapic =
 	struct acpi_madt_local_sapic *lsapic =
 		container_of(entry, struct acpi_madt_local_sapic, header);
 		container_of(entry, struct acpi_madt_local_sapic, header);
 
 
-	if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	if (device_declaration) {
 	if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
  * Retrieve the ARM CPU physical identifier (MPIDR)
  * Retrieve the ARM CPU physical identifier (MPIDR)
  */
  */
 static int map_gicc_mpidr(struct acpi_subtable_header *entry,
 static int map_gicc_mpidr(struct acpi_subtable_header *entry,
-		int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
-		bool ignore_disabled)
+		int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
 {
 {
 	struct acpi_madt_generic_interrupt *gicc =
 	struct acpi_madt_generic_interrupt *gicc =
 	    container_of(entry, struct acpi_madt_generic_interrupt, header);
 	    container_of(entry, struct acpi_madt_generic_interrupt, header);
 
 
-	if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	/* device_declaration means Device object in DSDT, in the
 	/* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
 }
 }
 
 
 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
-				   int type, u32 acpi_id, bool ignore_disabled)
+				   int type, u32 acpi_id)
 {
 {
 	unsigned long madt_end, entry;
 	unsigned long madt_end, entry;
 	phys_cpuid_t phys_id = PHYS_CPUID_INVALID;	/* CPU hardware ID */
 	phys_cpuid_t phys_id = PHYS_CPUID_INVALID;	/* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
 		struct acpi_subtable_header *header =
 		struct acpi_subtable_header *header =
 			(struct acpi_subtable_header *)entry;
 			(struct acpi_subtable_header *)entry;
 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-			if (!map_lapic_id(header, acpi_id, &phys_id,
-					  ignore_disabled))
+			if (!map_lapic_id(header, acpi_id, &phys_id))
 				break;
 				break;
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-			if (!map_x2apic_id(header, type, acpi_id, &phys_id,
-					   ignore_disabled))
+			if (!map_x2apic_id(header, type, acpi_id, &phys_id))
 				break;
 				break;
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-			if (!map_lsapic_id(header, type, acpi_id, &phys_id,
-					   ignore_disabled))
+			if (!map_lsapic_id(header, type, acpi_id, &phys_id))
 				break;
 				break;
 		} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
 		} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
-			if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
-					    ignore_disabled))
+			if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
 				break;
 				break;
 		}
 		}
 		entry += header->length;
 		entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
 	if (!madt)
 	if (!madt)
 		return PHYS_CPUID_INVALID;
 		return PHYS_CPUID_INVALID;
 
 
-	rv = map_madt_entry(madt, 1, acpi_id, true);
+	rv = map_madt_entry(madt, 1, acpi_id);
 
 
 	acpi_put_table((struct acpi_table_header *)madt);
 	acpi_put_table((struct acpi_table_header *)madt);
 
 
 	return rv;
 	return rv;
 }
 }
 
 
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
-				  bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 {
 {
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	union acpi_object *obj;
 	union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
 
 
 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-		map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+		map_lapic_id(header, acpi_id, &phys_id);
 	else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
 	else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-		map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+		map_lsapic_id(header, type, acpi_id, &phys_id);
 	else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
 	else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-		map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+		map_x2apic_id(header, type, acpi_id, &phys_id);
 	else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
 	else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
-		map_gicc_mpidr(header, type, acpi_id, &phys_id,
-			       ignore_disabled);
+		map_gicc_mpidr(header, type, acpi_id, &phys_id);
 
 
 exit:
 exit:
 	kfree(buffer.pointer);
 	kfree(buffer.pointer);
 	return phys_id;
 	return phys_id;
 }
 }
 
 
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
-				       u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
 {
 	phys_cpuid_t phys_id;
 	phys_cpuid_t phys_id;
 
 
-	phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+	phys_id = map_mat_entry(handle, type, acpi_id);
 	if (invalid_phys_cpuid(phys_id))
 	if (invalid_phys_cpuid(phys_id))
-		phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
-					   ignore_disabled);
+		phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
 
 
 	return phys_id;
 	return phys_id;
 }
 }
 
 
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
-	return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 }
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
-	int type, id;
-	u32 acpi_id;
-	acpi_status status;
-	acpi_object_type acpi_type;
-	unsigned long long tmp;
-	union acpi_object object = { 0 };
-	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-	status = acpi_get_type(handle, &acpi_type);
-	if (ACPI_FAILURE(status))
-		return false;
-
-	switch (acpi_type) {
-	case ACPI_TYPE_PROCESSOR:
-		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = object.processor.proc_id;
-
-		/* validate the acpi_id */
-		if(acpi_processor_validate_proc_id(acpi_id))
-			return false;
-		break;
-	case ACPI_TYPE_DEVICE:
-		status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = tmp;
-		break;
-	default:
-		return false;
-	}
-
-	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
-	*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
-	id = acpi_map_cpuid(*phys_id, acpi_id);
-
-	if (id < 0)
-		return false;
-	*cpuid = id;
-	return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
-			   void **rv)
-{
-	phys_cpuid_t phys_id;
-	int cpu_id;
-
-	if (!map_processor(handle, &phys_id, &cpu_id))
-		return AE_ERROR;
-
-	acpi_map_cpu2node(handle, cpu_id, phys_id);
-	return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
-	/* Set persistent cpu <-> node mapping for all processors. */
-	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-			    ACPI_UINT32_MAX, set_processor_node_mapping,
-			    NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
 			 u64 *phys_addr, int *ioapic_id)
 			 u64 *phys_addr, int *ioapic_id)

+ 4 - 2
drivers/ata/ahci_qoriq.c

@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 	case AHCI_LS1043A:
 	case AHCI_LS1043A:
 		if (!qpriv->ecc_addr)
 		if (!qpriv->ecc_addr)
 			return -EINVAL;
 			return -EINVAL;
-		writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+		writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+				qpriv->ecc_addr);
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
 		if (qpriv->is_dmacoherent)
 		if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 	case AHCI_LS1046A:
 	case AHCI_LS1046A:
 		if (!qpriv->ecc_addr)
 		if (!qpriv->ecc_addr)
 			return -EINVAL;
 			return -EINVAL;
-		writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+		writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+				qpriv->ecc_addr);
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
 		if (qpriv->is_dmacoherent)
 		if (qpriv->is_dmacoherent)

+ 0 - 1
drivers/ata/libata-sff.c

@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 		break;
 		break;
 
 
 	default:
 	default:
-		WARN_ON_ONCE(1);
 		return AC_ERR_SYSTEM;
 		return AC_ERR_SYSTEM;
 	}
 	}
 
 

+ 3 - 6
drivers/ata/libata-transport.c

@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
 
 
 static void ata_tport_release(struct device *dev)
 static void ata_tport_release(struct device *dev)
 {
 {
-	put_device(dev->parent);
 }
 }
 
 
 /**
 /**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
 	device_initialize(dev);
 	device_initialize(dev);
 	dev->type = &ata_port_type;
 	dev->type = &ata_port_type;
 
 
-	dev->parent = get_device(parent);
+	dev->parent = parent;
 	dev->release = ata_tport_release;
 	dev->release = ata_tport_release;
 	dev_set_name(dev, "ata%d", ap->print_id);
 	dev_set_name(dev, "ata%d", ap->print_id);
 	transport_setup_device(dev);
 	transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
 
 
 static void ata_tlink_release(struct device *dev)
 static void ata_tlink_release(struct device *dev)
 {
 {
-	put_device(dev->parent);
 }
 }
 
 
 /**
 /**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
 	int error;
 	int error;
 
 
 	device_initialize(dev);
 	device_initialize(dev);
-	dev->parent = get_device(&ap->tdev);
+	dev->parent = &ap->tdev;
 	dev->release = ata_tlink_release;
 	dev->release = ata_tlink_release;
 	if (ata_is_host_link(link))
 	if (ata_is_host_link(link))
 		dev_set_name(dev, "link%d", ap->print_id);
 		dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
 
 
 static void ata_tdev_release(struct device *dev)
 static void ata_tdev_release(struct device *dev)
 {
 {
-	put_device(dev->parent);
 }
 }
 
 
 /**
 /**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
 	int error;
 	int error;
 
 
 	device_initialize(dev);
 	device_initialize(dev);
-	dev->parent = get_device(&link->tdev);
+	dev->parent = &link->tdev;
 	dev->release = ata_tdev_release;
 	dev->release = ata_tdev_release;
 	if (ata_is_host_link(link))
 	if (ata_is_host_link(link))
 		dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
 		dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);

+ 0 - 5
drivers/base/core.c

@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
 	return restart_syscall();
 	return restart_syscall();
 }
 }
 
 
-void assert_held_device_hotplug(void)
-{
-	lockdep_assert_held(&device_hotplug_lock);
-}
-
 #ifdef CONFIG_BLOCK
 #ifdef CONFIG_BLOCK
 static inline int device_is_not_partition(struct device *dev)
 static inline int device_is_not_partition(struct device *dev)
 {
 {

+ 14 - 2
drivers/char/hw_random/omap-rng.c

@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
 				irq, err);
 				irq, err);
 			return err;
 			return err;
 		}
 		}
-		omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
 
 
-		priv->clk = of_clk_get(pdev->dev.of_node, 0);
+		priv->clk = devm_clk_get(&pdev->dev, NULL);
 		if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
 		if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 			return -EPROBE_DEFER;
 		if (!IS_ERR(priv->clk)) {
 		if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
 				dev_err(&pdev->dev, "unable to enable the clk, "
 				dev_err(&pdev->dev, "unable to enable the clk, "
 						    "err = %d\n", err);
 						    "err = %d\n", err);
 		}
 		}
+
+		/*
+		 * On OMAP4, enabling the shutdown_oflo interrupt is
+		 * done in the interrupt mask register. There is no
+		 * such register on EIP76, and it's enabled by the
+		 * same bit in the control register
+		 */
+		if (priv->pdata->regs[RNG_INTMASK_REG])
+			omap_rng_write(priv, RNG_INTMASK_REG,
+				       RNG_SHUTDOWN_OFLO_MASK);
+		else
+			omap_rng_write(priv, RNG_CONTROL_REG,
+				       RNG_SHUTDOWN_OFLO_MASK);
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 1 - 15
drivers/clocksource/tcb_clksrc.c

@@ -10,7 +10,6 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/atmel_tc.h>
 #include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
 
 
 
 
 /*
 /*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
 	return (upper << 16) | lower;
 	return (upper << 16) | lower;
 }
 }
 
 
-static u32 tc_get_cv32(void)
-{
-	return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
 static u64 tc_get_cycles32(struct clocksource *cs)
 static u64 tc_get_cycles32(struct clocksource *cs)
 {
 {
-	return tc_get_cv32();
+	return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
 }
 }
 
 
 static struct clocksource clksrc = {
 static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 };
 
 
-static u64 notrace tc_read_sched_clock(void)
-{
-	return tc_get_cv32();
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
 
 struct tc_clkevt_device {
 struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
 		clksrc.read = tc_get_cycles32;
 		clksrc.read = tc_get_cycles32;
 		/* setup ony channel 0 */
 		/* setup ony channel 0 */
 		tcb_setup_single_chan(tc, best_divisor_idx);
 		tcb_setup_single_chan(tc, best_divisor_idx);
-
-		/* register sched_clock on chips with single 32 bit counter */
-		sched_clock_register(tc_read_sched_clock, 32, divided_rate);
 	} else {
 	} else {
 		/* tclib will give us three clocks no matter what the
 		/* tclib will give us three clocks no matter what the
 		 * underlying platform supports.
 		 * underlying platform supports.

+ 5 - 3
drivers/cpufreq/cpufreq.c

@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
 					char *buf)
 					char *buf)
 {
 {
 	unsigned int cur_freq = __cpufreq_get(policy);
 	unsigned int cur_freq = __cpufreq_get(policy);
-	if (!cur_freq)
-		return sprintf(buf, "<unknown>");
-	return sprintf(buf, "%u\n", cur_freq);
+
+	if (cur_freq)
+		return sprintf(buf, "%u\n", cur_freq);
+
+	return sprintf(buf, "<unknown>\n");
 }
 }
 
 
 /**
 /**

+ 31 - 33
drivers/cpufreq/intel_pstate.c

@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
 	return div64_u64(x << EXT_FRAC_BITS, y);
 	return div64_u64(x << EXT_FRAC_BITS, y);
 }
 }
 
 
+static inline int32_t percent_ext_fp(int percent)
+{
+	return div_ext_fp(percent, 100);
+}
+
 /**
 /**
  * struct sample -	Store performance sample
  * struct sample -	Store performance sample
  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
  * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
@@ -845,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
 
 
 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 {
 {
-	int min, hw_min, max, hw_max, cpu, range, adj_range;
+	int min, hw_min, max, hw_max, cpu;
 	struct perf_limits *perf_limits = limits;
 	struct perf_limits *perf_limits = limits;
 	u64 value, cap;
 	u64 value, cap;
 
 
 	for_each_cpu(cpu, policy->cpus) {
 	for_each_cpu(cpu, policy->cpus) {
-		int max_perf_pct, min_perf_pct;
 		struct cpudata *cpu_data = all_cpu_data[cpu];
 		struct cpudata *cpu_data = all_cpu_data[cpu];
 		s16 epp;
 		s16 epp;
 
 
@@ -863,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 			hw_max = HWP_GUARANTEED_PERF(cap);
 			hw_max = HWP_GUARANTEED_PERF(cap);
 		else
 		else
 			hw_max = HWP_HIGHEST_PERF(cap);
 			hw_max = HWP_HIGHEST_PERF(cap);
-		range = hw_max - hw_min;
 
 
-		max_perf_pct = perf_limits->max_perf_pct;
-		min_perf_pct = perf_limits->min_perf_pct;
+		min = fp_ext_toint(hw_max * perf_limits->min_perf);
 
 
 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-		adj_range = min_perf_pct * range / 100;
-		min = hw_min + adj_range;
+
 		value &= ~HWP_MIN_PERF(~0L);
 		value &= ~HWP_MIN_PERF(~0L);
 		value |= HWP_MIN_PERF(min);
 		value |= HWP_MIN_PERF(min);
 
 
-		adj_range = max_perf_pct * range / 100;
-		max = hw_min + adj_range;
-
+		max = fp_ext_toint(hw_max * perf_limits->max_perf);
 		value &= ~HWP_MAX_PERF(~0L);
 		value &= ~HWP_MAX_PERF(~0L);
 		value |= HWP_MAX_PERF(max);
 		value |= HWP_MAX_PERF(max);
 
 
@@ -989,6 +988,7 @@ static void intel_pstate_update_policies(void)
 static int pid_param_set(void *data, u64 val)
 static int pid_param_set(void *data, u64 val)
 {
 {
 	*(u32 *)data = val;
 	*(u32 *)data = val;
+	pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
 	intel_pstate_reset_all_pid();
 	intel_pstate_reset_all_pid();
 	return 0;
 	return 0;
 }
 }
@@ -1225,7 +1225,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 				   limits->max_perf_pct);
 				   limits->max_perf_pct);
 	limits->max_perf_pct = max(limits->min_perf_pct,
 	limits->max_perf_pct = max(limits->min_perf_pct,
 				   limits->max_perf_pct);
 				   limits->max_perf_pct);
-	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+	limits->max_perf = percent_ext_fp(limits->max_perf_pct);
 
 
 	intel_pstate_update_policies();
 	intel_pstate_update_policies();
 
 
@@ -1262,7 +1262,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 				   limits->min_perf_pct);
 				   limits->min_perf_pct);
 	limits->min_perf_pct = min(limits->max_perf_pct,
 	limits->min_perf_pct = min(limits->max_perf_pct,
 				   limits->min_perf_pct);
 				   limits->min_perf_pct);
-	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
+	limits->min_perf = percent_ext_fp(limits->min_perf_pct);
 
 
 	intel_pstate_update_policies();
 	intel_pstate_update_policies();
 
 
@@ -2080,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 					    struct perf_limits *limits)
 					    struct perf_limits *limits)
 {
 {
+	int32_t max_policy_perf, min_policy_perf;
 
 
-	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
-					      policy->cpuinfo.max_freq);
-	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+	max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+	max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
 	if (policy->max == policy->min) {
 	if (policy->max == policy->min) {
-		limits->min_policy_pct = limits->max_policy_pct;
+		min_policy_perf = max_policy_perf;
 	} else {
 	} else {
-		limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
-						      policy->cpuinfo.max_freq);
-		limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
-						 0, 100);
+		min_policy_perf = div_ext_fp(policy->min,
+					     policy->cpuinfo.max_freq);
+		min_policy_perf = clamp_t(int32_t, min_policy_perf,
+					  0, max_policy_perf);
 	}
 	}
 
 
-	/* Normalize user input to [min_policy_pct, max_policy_pct] */
-	limits->min_perf_pct = max(limits->min_policy_pct,
-				   limits->min_sysfs_pct);
-	limits->min_perf_pct = min(limits->max_policy_pct,
-				   limits->min_perf_pct);
-	limits->max_perf_pct = min(limits->max_policy_pct,
-				   limits->max_sysfs_pct);
-	limits->max_perf_pct = max(limits->min_policy_pct,
-				   limits->max_perf_pct);
+	/* Normalize user input to [min_perf, max_perf] */
+	limits->min_perf = max(min_policy_perf,
+			       percent_ext_fp(limits->min_sysfs_pct));
+	limits->min_perf = min(limits->min_perf, max_policy_perf);
+	limits->max_perf = min(max_policy_perf,
+			       percent_ext_fp(limits->max_sysfs_pct));
+	limits->max_perf = max(min_policy_perf, limits->max_perf);
 
 
-	/* Make sure min_perf_pct <= max_perf_pct */
-	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+	/* Make sure min_perf <= max_perf */
+	limits->min_perf = min(limits->min_perf, limits->max_perf);
 
 
-	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
 	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
 	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
 	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
 	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+	limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+	limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
 
 
 	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
 	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
 		 limits->max_perf_pct, limits->min_perf_pct);
 		 limits->max_perf_pct, limits->min_perf_pct);

+ 85 - 47
drivers/crypto/s5p-sss.c

@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
 	scatterwalk_done(&walk, out, 0);
 	scatterwalk_done(&walk, out, 0);
 }
 }
 
 
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_sg_done(struct s5p_aes_dev *dev)
 {
 {
 	if (dev->sg_dst_cpy) {
 	if (dev->sg_dst_cpy) {
 		dev_dbg(dev->dev,
 		dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
 	}
 	}
 	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
 	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
 	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
 	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+}
 
 
-	/* holding a lock outside */
+/* Calls the completion. Cannot be called with dev->lock hold. */
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
 	dev->req->base.complete(&dev->req->base, err);
 	dev->req->base.complete(&dev->req->base, err);
 	dev->busy = false;
 	dev->busy = false;
 }
 }
@@ -368,51 +371,44 @@ exit:
 }
 }
 
 
 /*
 /*
- * Returns true if new transmitting (output) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_outdata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new transmitting (output) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_outdata()).
  */
  */
-static bool s5p_aes_tx(struct s5p_aes_dev *dev)
+static int s5p_aes_tx(struct s5p_aes_dev *dev)
 {
 {
-	int err = 0;
-	bool ret = false;
+	int ret = 0;
 
 
 	s5p_unset_outdata(dev);
 	s5p_unset_outdata(dev);
 
 
 	if (!sg_is_last(dev->sg_dst)) {
 	if (!sg_is_last(dev->sg_dst)) {
-		err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
-		if (err)
-			s5p_aes_complete(dev, err);
-		else
-			ret = true;
-	} else {
-		s5p_aes_complete(dev, err);
-
-		dev->busy = true;
-		tasklet_schedule(&dev->tasklet);
+		ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+		if (!ret)
+			ret = 1;
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 /*
 /*
- * Returns true if new receiving (input) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_indata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new receiving (input) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_indata()).
  */
  */
-static bool s5p_aes_rx(struct s5p_aes_dev *dev)
+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
 {
 {
-	int err;
-	bool ret = false;
+	int ret = 0;
 
 
 	s5p_unset_indata(dev);
 	s5p_unset_indata(dev);
 
 
 	if (!sg_is_last(dev->sg_src)) {
 	if (!sg_is_last(dev->sg_src)) {
-		err = s5p_set_indata(dev, sg_next(dev->sg_src));
-		if (err)
-			s5p_aes_complete(dev, err);
-		else
-			ret = true;
+		ret = s5p_set_indata(dev, sg_next(dev->sg_src));
+		if (!ret)
+			ret = 1;
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
 {
 {
 	struct platform_device *pdev = dev_id;
 	struct platform_device *pdev = dev_id;
 	struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
 	struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
-	bool set_dma_tx = false;
-	bool set_dma_rx = false;
+	int err_dma_tx = 0;
+	int err_dma_rx = 0;
+	bool tx_end = false;
 	unsigned long flags;
 	unsigned long flags;
 	uint32_t status;
 	uint32_t status;
+	int err;
 
 
 	spin_lock_irqsave(&dev->lock, flags);
 	spin_lock_irqsave(&dev->lock, flags);
 
 
+	/*
+	 * Handle rx or tx interrupt. If there is still data (scatterlist did not
+	 * reach end), then map next scatterlist entry.
+	 * In case of such mapping error, s5p_aes_complete() should be called.
+	 *
+	 * If there is no more data in tx scatter list, call s5p_aes_complete()
+	 * and schedule new tasklet.
+	 */
 	status = SSS_READ(dev, FCINTSTAT);
 	status = SSS_READ(dev, FCINTSTAT);
 	if (status & SSS_FCINTSTAT_BRDMAINT)
 	if (status & SSS_FCINTSTAT_BRDMAINT)
-		set_dma_rx = s5p_aes_rx(dev);
-	if (status & SSS_FCINTSTAT_BTDMAINT)
-		set_dma_tx = s5p_aes_tx(dev);
+		err_dma_rx = s5p_aes_rx(dev);
+
+	if (status & SSS_FCINTSTAT_BTDMAINT) {
+		if (sg_is_last(dev->sg_dst))
+			tx_end = true;
+		err_dma_tx = s5p_aes_tx(dev);
+	}
 
 
 	SSS_WRITE(dev, FCINTPEND, status);
 	SSS_WRITE(dev, FCINTPEND, status);
 
 
-	/*
-	 * Writing length of DMA block (either receiving or transmitting)
-	 * will start the operation immediately, so this should be done
-	 * at the end (even after clearing pending interrupts to not miss the
-	 * interrupt).
-	 */
-	if (set_dma_tx)
-		s5p_set_dma_outdata(dev, dev->sg_dst);
-	if (set_dma_rx)
-		s5p_set_dma_indata(dev, dev->sg_src);
+	if (err_dma_rx < 0) {
+		err = err_dma_rx;
+		goto error;
+	}
+	if (err_dma_tx < 0) {
+		err = err_dma_tx;
+		goto error;
+	}
+
+	if (tx_end) {
+		s5p_sg_done(dev);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		s5p_aes_complete(dev, 0);
+		dev->busy = true;
+		tasklet_schedule(&dev->tasklet);
+	} else {
+		/*
+		 * Writing length of DMA block (either receiving or
+		 * transmitting) will start the operation immediately, so this
+		 * should be done at the end (even after clearing pending
+		 * interrupts to not miss the interrupt).
+		 */
+		if (err_dma_tx == 1)
+			s5p_set_dma_outdata(dev, dev->sg_dst);
+		if (err_dma_rx == 1)
+			s5p_set_dma_indata(dev, dev->sg_src);
 
 
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+	return IRQ_HANDLED;
+
+error:
+	s5p_sg_done(dev);
 	spin_unlock_irqrestore(&dev->lock, flags);
 	spin_unlock_irqrestore(&dev->lock, flags);
+	s5p_aes_complete(dev, err);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -597,8 +633,9 @@ outdata_error:
 	s5p_unset_indata(dev);
 	s5p_unset_indata(dev);
 
 
 indata_error:
 indata_error:
-	s5p_aes_complete(dev, err);
+	s5p_sg_done(dev);
 	spin_unlock_irqrestore(&dev->lock, flags);
 	spin_unlock_irqrestore(&dev->lock, flags);
+	s5p_aes_complete(dev, err);
 }
 }
 
 
 static void s5p_tasklet_cb(unsigned long data)
 static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
 		goto err_irq;
 	}
 	}
-	err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
-			       IRQF_SHARED, pdev->name, pdev);
+	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+					s5p_aes_interrupt, IRQF_ONESHOT,
+					pdev->name, pdev);
 	if (err < 0) {
 	if (err < 0) {
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
 		goto err_irq;

+ 30 - 3
drivers/dax/dax.c

@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 	int rc = VM_FAULT_SIGBUS;
 	int rc = VM_FAULT_SIGBUS;
 	phys_addr_t phys;
 	phys_addr_t phys;
 	pfn_t pfn;
 	pfn_t pfn;
+	unsigned int fault_size = PAGE_SIZE;
 
 
 	if (check_vma(dax_dev, vmf->vma, __func__))
 	if (check_vma(dax_dev, vmf->vma, __func__))
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}
 
 
+	if (fault_size != dax_region->align)
+		return VM_FAULT_SIGBUS;
+
 	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
 	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
 	if (phys == -1) {
 	if (phys == -1) {
-		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+		dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
 				vmf->pgoff);
 				vmf->pgoff);
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 	phys_addr_t phys;
 	phys_addr_t phys;
 	pgoff_t pgoff;
 	pgoff_t pgoff;
 	pfn_t pfn;
 	pfn_t pfn;
+	unsigned int fault_size = PMD_SIZE;
 
 
 	if (check_vma(dax_dev, vmf->vma, __func__))
 	if (check_vma(dax_dev, vmf->vma, __func__))
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}
 
 
+	if (fault_size < dax_region->align)
+		return VM_FAULT_SIGBUS;
+	else if (fault_size > dax_region->align)
+		return VM_FAULT_FALLBACK;
+
+	/* if we are outside of the VMA */
+	if (pmd_addr < vmf->vma->vm_start ||
+			(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+		return VM_FAULT_SIGBUS;
+
 	pgoff = linear_page_index(vmf->vma, pmd_addr);
 	pgoff = linear_page_index(vmf->vma, pmd_addr);
 	phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
 	phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
 	if (phys == -1) {
 	if (phys == -1) {
-		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+		dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
 				pgoff);
 				pgoff);
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 	phys_addr_t phys;
 	phys_addr_t phys;
 	pgoff_t pgoff;
 	pgoff_t pgoff;
 	pfn_t pfn;
 	pfn_t pfn;
+	unsigned int fault_size = PUD_SIZE;
+
 
 
 	if (check_vma(dax_dev, vmf->vma, __func__))
 	if (check_vma(dax_dev, vmf->vma, __func__))
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}
 
 
+	if (fault_size < dax_region->align)
+		return VM_FAULT_SIGBUS;
+	else if (fault_size > dax_region->align)
+		return VM_FAULT_FALLBACK;
+
+	/* if we are outside of the VMA */
+	if (pud_addr < vmf->vma->vm_start ||
+			(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+		return VM_FAULT_SIGBUS;
+
 	pgoff = linear_page_index(vmf->vma, pud_addr);
 	pgoff = linear_page_index(vmf->vma, pud_addr);
 	phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
 	phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
 	if (phys == -1) {
 	if (phys == -1) {
-		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+		dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
 				pgoff);
 				pgoff);
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 	}
 	}

+ 0 - 2
drivers/gpu/drm/amd/acp/Makefile

@@ -3,6 +3,4 @@
 # of AMDSOC/AMDGPU drm driver.
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
 # It provides the HW control for ACP related functionalities.
 
 
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -240,6 +240,8 @@ free_partial_kdata:
 	for (; i >= 0; i--)
 	for (; i >= 0; i--)
 		drm_free_large(p->chunks[i].kdata);
 		drm_free_large(p->chunks[i].kdata);
 	kfree(p->chunks);
 	kfree(p->chunks);
+	p->chunks = NULL;
+	p->nchunks = 0;
 put_ctx:
 put_ctx:
 	amdgpu_ctx_put(p->ctx);
 	amdgpu_ctx_put(p->ctx);
 free_chunk:
 free_chunk:

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 		use_bank = 0;
 		use_bank = 0;
 	}
 	}
 
 
-	*pos &= 0x3FFFF;
+	*pos &= (1UL << 22) - 1;
 
 
 	if (use_bank) {
 	if (use_bank) {
 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 		use_bank = 0;
 		use_bank = 0;
 	}
 	}
 
 
-	*pos &= 0x3FFFF;
+	*pos &= (1UL << 22) - 1;
 
 
 	if (use_bank) {
 	if (use_bank) {
 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||

+ 6 - 0
drivers/gpu/drm/amd/amdgpu/si_dpm.c

@@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
 		    (adev->pdev->device == 0x6667)) {
 		    (adev->pdev->device == 0x6667)) {
 			max_sclk = 75000;
 			max_sclk = 75000;
 		}
 		}
+	} else if (adev->asic_type == CHIP_OLAND) {
+		if ((adev->pdev->device == 0x6604) &&
+		    (adev->pdev->subsystem_vendor == 0x1028) &&
+		    (adev->pdev->subsystem_device == 0x066F)) {
+			max_sclk = 75000;
+		}
 	}
 	}
 
 
 	if (rps->vce_active) {
 	if (rps->vce_active) {

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
 		/* rev0 hardware requires workarounds to support PG */
 		/* rev0 hardware requires workarounds to support PG */
 		adev->pg_flags = 0;
 		adev->pg_flags = 0;
 		if (adev->rev_id != 0x00) {
 		if (adev->rev_id != 0x00) {
-			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+			adev->pg_flags |=
 				AMD_PG_SUPPORT_GFX_SMG |
 				AMD_PG_SUPPORT_GFX_SMG |
 				AMD_PG_SUPPORT_GFX_PIPELINE |
 				AMD_PG_SUPPORT_GFX_PIPELINE |
 				AMD_PG_SUPPORT_CP |
 				AMD_PG_SUPPORT_CP |

+ 1 - 1
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c

@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 	if (bgate) {
 	if (bgate) {
 		cgs_set_powergating_state(hwmgr->device,
 		cgs_set_powergating_state(hwmgr->device,
 						AMD_IP_BLOCK_TYPE_VCE,
 						AMD_IP_BLOCK_TYPE_VCE,
-						AMD_PG_STATE_UNGATE);
+						AMD_PG_STATE_GATE);
 		cgs_set_clockgating_state(hwmgr->device,
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_GATE);
 				AMD_CG_STATE_GATE);

+ 1 - 2
drivers/gpu/drm/arm/malidp_crtc.c

@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
 
 
 	clk_prepare_enable(hwdev->pxlclk);
 	clk_prepare_enable(hwdev->pxlclk);
 
 
-	/* mclk needs to be set to the same or higher rate than pxlclk */
-	clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+	/* We rely on firmware to set mclk to a sensible level. */
 	clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 	clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
 
 	hwdev->modeset(hwdev, &vm);
 	hwdev->modeset(hwdev, &vm);

+ 1 - 1
drivers/gpu/drm/arm/malidp_hw.c

@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
 	{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
 	{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
 	{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
 	{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
 	{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
 	{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
-	{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+	{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
 };
 };
 
 
 #define MALIDP_DE_DEFAULT_PREFETCH_START	5
 #define MALIDP_DE_DEFAULT_PREFETCH_START	5

+ 16 - 2
drivers/gpu/drm/arm/malidp_planes.c

@@ -37,6 +37,8 @@
 #define   LAYER_V_VAL(x)		(((x) & 0x1fff) << 16)
 #define   LAYER_V_VAL(x)		(((x) & 0x1fff) << 16)
 #define MALIDP_LAYER_COMP_SIZE		0x010
 #define MALIDP_LAYER_COMP_SIZE		0x010
 #define MALIDP_LAYER_OFFSET		0x014
 #define MALIDP_LAYER_OFFSET		0x014
+#define MALIDP550_LS_ENABLE		0x01c
+#define MALIDP550_LS_R1_IN_SIZE		0x020
 
 
 /*
 /*
  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
 			LAYER_V_VAL(plane->state->crtc_y),
 			LAYER_V_VAL(plane->state->crtc_y),
 			mp->layer->base + MALIDP_LAYER_OFFSET);
 			mp->layer->base + MALIDP_LAYER_OFFSET);
 
 
+	if (mp->layer->id == DE_SMART)
+		malidp_hw_write(mp->hwdev,
+				LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+				mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
 	/* first clear the rotation bits */
 	/* first clear the rotation bits */
 	val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
 	val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
 	val &= ~LAYER_ROT_MASK;
 	val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
 		plane->hwdev = malidp->dev;
 		plane->hwdev = malidp->dev;
 		plane->layer = &map->layers[i];
 		plane->layer = &map->layers[i];
 
 
-		/* Skip the features which the SMART layer doesn't have */
-		if (id == DE_SMART)
+		if (id == DE_SMART) {
+			/*
+			 * Enable the first rectangle in the SMART layer to be
+			 * able to use it as a drm plane.
+			 */
+			malidp_hw_write(malidp->dev, 1,
+					plane->layer->base + MALIDP550_LS_ENABLE);
+			/* Skip the features which the SMART layer doesn't have. */
 			continue;
 			continue;
+		}
 
 
 		drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
 		drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
 		malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
 		malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,

+ 1 - 0
drivers/gpu/drm/arm/malidp_regs.h

@@ -84,6 +84,7 @@
 /* Stride register offsets relative to Lx_BASE */
 /* Stride register offsets relative to Lx_BASE */
 #define MALIDP_DE_LG_STRIDE		0x18
 #define MALIDP_DE_LG_STRIDE		0x18
 #define MALIDP_DE_LV_STRIDE0		0x18
 #define MALIDP_DE_LV_STRIDE0		0x18
+#define MALIDP550_DE_LS_R1_STRIDE	0x28
 
 
 /* macros to set values into registers */
 /* macros to set values into registers */
 #define MALIDP_DE_H_FRONTPORCH(x)	(((x) & 0xfff) << 0)
 #define MALIDP_DE_H_FRONTPORCH(x)	(((x) & 0xfff) << 0)

+ 1 - 0
drivers/gpu/drm/i915/i915_drv.h

@@ -293,6 +293,7 @@ enum plane_id {
 	PLANE_PRIMARY,
 	PLANE_PRIMARY,
 	PLANE_SPRITE0,
 	PLANE_SPRITE0,
 	PLANE_SPRITE1,
 	PLANE_SPRITE1,
+	PLANE_SPRITE2,
 	PLANE_CURSOR,
 	PLANE_CURSOR,
 	I915_MAX_PLANES,
 	I915_MAX_PLANES,
 };
 };

+ 94 - 3
drivers/gpu/drm/i915/i915_gem.c

@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
 
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
 
+	ret = -ENODEV;
+	if (obj->ops->pwrite)
+		ret = obj->ops->pwrite(obj, args);
+	if (ret != -ENODEV)
+		goto err;
+
 	ret = i915_gem_object_wait(obj,
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_ALL,
 				   I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 	 */
 	 */
 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
 	obj->mm.madv = __I915_MADV_PURGED;
 	obj->mm.madv = __I915_MADV_PURGED;
+	obj->mm.pages = ERR_PTR(-EFAULT);
 }
 }
 
 
 /* Try to discard unwanted pages */
 /* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 
 
 	__i915_gem_object_reset_page_iter(obj);
 	__i915_gem_object_reset_page_iter(obj);
 
 
-	obj->ops->put_pages(obj, pages);
+	if (!IS_ERR(pages))
+		obj->ops->put_pages(obj, pages);
+
 unlock:
 unlock:
 	mutex_unlock(&obj->mm.lock);
 	mutex_unlock(&obj->mm.lock);
 }
 }
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	if (unlikely(!obj->mm.pages)) {
+	if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
 		err = ____i915_gem_object_get_pages(obj);
 		err = ____i915_gem_object_get_pages(obj);
 		if (err)
 		if (err)
 			goto unlock;
 			goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 
 	pinned = true;
 	pinned = true;
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-		if (unlikely(!obj->mm.pages)) {
+		if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
 			ret = ____i915_gem_object_get_pages(obj);
 			ret = ____i915_gem_object_get_pages(obj);
 			if (ret)
 			if (ret)
 				goto err_unlock;
 				goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
 	goto out_unlock;
 	goto out_unlock;
 }
 }
 
 
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+			   const struct drm_i915_gem_pwrite *arg)
+{
+	struct address_space *mapping = obj->base.filp->f_mapping;
+	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+	u64 remain, offset;
+	unsigned int pg;
+
+	/* Before we instantiate/pin the backing store for our use, we
+	 * can prepopulate the shmemfs filp efficiently using a write into
+	 * the pagecache. We avoid the penalty of instantiating all the
+	 * pages, important if the user is just writing to a few and never
+	 * uses the object on the GPU, and using a direct write into shmemfs
+	 * allows it to avoid the cost of retrieving a page (either swapin
+	 * or clearing-before-use) before it is overwritten.
+	 */
+	if (READ_ONCE(obj->mm.pages))
+		return -ENODEV;
+
+	/* Before the pages are instantiated the object is treated as being
+	 * in the CPU domain. The pages will be clflushed as required before
+	 * use, and we can freely write into the pages directly. If userspace
+	 * races pwrite with any other operation; corruption will ensue -
+	 * that is userspace's prerogative!
+	 */
+
+	remain = arg->size;
+	offset = arg->offset;
+	pg = offset_in_page(offset);
+
+	do {
+		unsigned int len, unwritten;
+		struct page *page;
+		void *data, *vaddr;
+		int err;
+
+		len = PAGE_SIZE - pg;
+		if (len > remain)
+			len = remain;
+
+		err = pagecache_write_begin(obj->base.filp, mapping,
+					    offset, len, 0,
+					    &page, &data);
+		if (err < 0)
+			return err;
+
+		vaddr = kmap(page);
+		unwritten = copy_from_user(vaddr + pg, user_data, len);
+		kunmap(page);
+
+		err = pagecache_write_end(obj->base.filp, mapping,
+					  offset, len, len - unwritten,
+					  page, data);
+		if (err < 0)
+			return err;
+
+		if (unwritten)
+			return -EFAULT;
+
+		remain -= len;
+		user_data += len;
+		offset += len;
+		pg = 0;
+	} while (remain);
+
+	return 0;
+}
+
 static bool ban_context(const struct i915_gem_context *ctx)
 static bool ban_context(const struct i915_gem_context *ctx)
 {
 {
 	return (i915_gem_context_is_bannable(ctx) &&
 	return (i915_gem_context_is_bannable(ctx) &&
@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
 		if (args->timeout_ns < 0)
 		if (args->timeout_ns < 0)
 			args->timeout_ns = 0;
 			args->timeout_ns = 0;
+
+		/*
+		 * Apparently ktime isn't accurate enough and occasionally has a
+		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+		 * things up to make the test happy. We allow up to 1 jiffy.
+		 *
+		 * This is a regression from the timespec->ktime conversion.
+		 */
+		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+			args->timeout_ns = 0;
 	}
 	}
 
 
 	i915_gem_object_put(obj);
 	i915_gem_object_put(obj);
@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 		 I915_GEM_OBJECT_IS_SHRINKABLE,
 		 I915_GEM_OBJECT_IS_SHRINKABLE,
+
 	.get_pages = i915_gem_object_get_pages_gtt,
 	.get_pages = i915_gem_object_get_pages_gtt,
 	.put_pages = i915_gem_object_put_pages_gtt,
 	.put_pages = i915_gem_object_put_pages_gtt,
+
+	.pwrite = i915_gem_object_pwrite_gtt,
 };
 };
 
 
 struct drm_i915_gem_object *
 struct drm_i915_gem_object *

+ 4 - 4
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		 * those as well to make room for our guard pages.
 		 * those as well to make room for our guard pages.
 		 */
 		 */
 		if (check_color) {
 		if (check_color) {
-			if (vma->node.start + vma->node.size == node->start) {
-				if (vma->node.color == node->color)
+			if (node->start + node->size == target->start) {
+				if (node->color == target->color)
 					continue;
 					continue;
 			}
 			}
-			if (vma->node.start == node->start + node->size) {
-				if (vma->node.color == node->color)
+			if (node->start == target->start + target->size) {
+				if (node->color == target->color)
 					continue;
 					continue;
 			}
 			}
 		}
 		}

+ 3 - 0
drivers/gpu/drm/i915/i915_gem_object.h

@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
 	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
 	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
 	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
 
+	int (*pwrite)(struct drm_i915_gem_object *,
+		      const struct drm_i915_gem_pwrite *);
+
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
 };
 };

+ 37 - 20
drivers/gpu/drm/i915/i915_vma.c

@@ -512,10 +512,36 @@ err_unpin:
 	return ret;
 	return ret;
 }
 }
 
 
+static void
+i915_vma_remove(struct i915_vma *vma)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+
+	drm_mm_remove_node(&vma->node);
+	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+	/* Since the unbound list is global, only move to that list if
+	 * no more VMAs exist.
+	 */
+	if (--obj->bind_count == 0)
+		list_move_tail(&obj->global_link,
+			       &to_i915(obj->base.dev)->mm.unbound_list);
+
+	/* And finally now the object is completely decoupled from this vma,
+	 * we can drop its hold on the backing storage and allow it to be
+	 * reaped by the shrinker.
+	 */
+	i915_gem_object_unpin_pages(obj);
+	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
 int __i915_vma_do_pin(struct i915_vma *vma,
 		      u64 size, u64 alignment, u64 flags)
 		      u64 size, u64 alignment, u64 flags)
 {
 {
-	unsigned int bound = vma->flags;
+	const unsigned int bound = vma->flags;
 	int ret;
 	int ret;
 
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 
 
 	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
 	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
 		ret = -EBUSY;
 		ret = -EBUSY;
-		goto err;
+		goto err_unpin;
 	}
 	}
 
 
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 		if (ret)
-			goto err;
+			goto err_unpin;
 	}
 	}
 
 
 	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
 	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
 	if (ret)
 	if (ret)
-		goto err;
+		goto err_remove;
 
 
 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
 		__i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 	return 0;
 	return 0;
 
 
-err:
+err_remove:
+	if ((bound & I915_VMA_BIND_MASK) == 0) {
+		GEM_BUG_ON(vma->pages);
+		i915_vma_remove(vma);
+	}
+err_unpin:
 	__i915_vma_unpin(vma);
 	__i915_vma_unpin(vma);
 	return ret;
 	return ret;
 }
 }
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	}
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
 
-	drm_mm_remove_node(&vma->node);
-	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
 	if (vma->pages != obj->mm.pages) {
 	if (vma->pages != obj->mm.pages) {
 		GEM_BUG_ON(!vma->pages);
 		GEM_BUG_ON(!vma->pages);
 		sg_free_table(vma->pages);
 		sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	}
 	vma->pages = NULL;
 	vma->pages = NULL;
 
 
-	/* Since the unbound list is global, only move to that list if
-	 * no more VMAs exist. */
-	if (--obj->bind_count == 0)
-		list_move_tail(&obj->global_link,
-			       &to_i915(obj->base.dev)->mm.unbound_list);
-
-	/* And finally now the object is completely decoupled from this vma,
-	 * we can drop its hold on the backing storage and allow it to be
-	 * reaped by the shrinker.
-	 */
-	i915_gem_object_unpin_pages(obj);
-	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+	i915_vma_remove(vma);
 
 
 destroy:
 destroy:
 	if (unlikely(i915_vma_is_closed(vma)))
 	if (unlikely(i915_vma_is_closed(vma)))

+ 29 - 29
drivers/gpu/drm/i915/intel_display.c

@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
 	crtc->base.mode = crtc->base.state->mode;
 	crtc->base.mode = crtc->base.state->mode;
 
 
-	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
-		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
-		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
 	/*
 	/*
 	 * Update pipe size and adjust fitter if needed: the reason for this is
 	 * Update pipe size and adjust fitter if needed: the reason for this is
 	 * that in compute_mode_changes we check the native mode (not the pfit
 	 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 	struct intel_crtc_scaler_state *scaler_state =
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc->config->scaler_state;
 		&crtc->config->scaler_state;
 
 
-	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
 	if (crtc->config->pch_pfit.enabled) {
 	if (crtc->config->pch_pfit.enabled) {
 		int id;
 		int id;
 
 
-		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
-			DRM_ERROR("Requesting pfit without getting a scaler first\n");
+		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
 			return;
 			return;
-		}
 
 
 		id = scaler_state->scaler_id;
 		id = scaler_state->scaler_id;
 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
-		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
 	}
 	}
 }
 }
 
 
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 	} while (progress);
 	} while (progress);
 }
 }
 
 
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+	struct intel_atomic_state *state, *next;
+	struct llist_node *freed;
+
+	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+	llist_for_each_entry_safe(state, next, freed, freed)
+		drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+	intel_atomic_helper_free_state(dev_priv);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
 {
 	struct drm_device *dev = state->dev;
 	struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 * can happen also when the device is completely off.
 	 * can happen also when the device is completely off.
 	 */
 	 */
 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+	intel_atomic_helper_free_state(dev_priv);
 }
 }
 
 
 static void intel_atomic_commit_work(struct work_struct *work)
 static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 		to_intel_atomic_state(old_crtc_state->state);
 		to_intel_atomic_state(old_crtc_state->state);
 	bool modeset = needs_modeset(crtc->state);
 	bool modeset = needs_modeset(crtc->state);
 
 
+	if (!modeset &&
+	    (intel_cstate->base.color_mgmt_changed ||
+	     intel_cstate->update_pipe)) {
+		intel_color_set_csc(crtc->state);
+		intel_color_load_luts(crtc->state);
+	}
+
 	/* Perform vblank evasion around commit operation */
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(intel_crtc);
 	intel_pipe_update_start(intel_crtc);
 
 
 	if (modeset)
 	if (modeset)
 		goto out;
 		goto out;
 
 
-	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
-		intel_color_set_csc(crtc->state);
-		intel_color_load_luts(crtc->state);
-	}
-
 	if (intel_cstate->update_pipe)
 	if (intel_cstate->update_pipe)
 		intel_update_pipe_config(intel_crtc, old_intel_cstate);
 		intel_update_pipe_config(intel_crtc, old_intel_cstate);
 	else if (INTEL_GEN(dev_priv) >= 9)
 	else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
 	drm_modeset_acquire_fini(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 }
 }
 
 
-static void intel_atomic_helper_free_state(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
-	struct intel_atomic_state *state, *next;
-	struct llist_node *freed;
-
-	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
-	llist_for_each_entry_safe(state, next, freed, freed)
-		drm_atomic_state_put(&state->base);
-}
-
 int intel_modeset_init(struct drm_device *dev)
 int intel_modeset_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
 	dev->mode_config.funcs = &intel_mode_funcs;
 	dev->mode_config.funcs = &intel_mode_funcs;
 
 
 	INIT_WORK(&dev_priv->atomic_helper.free_work,
 	INIT_WORK(&dev_priv->atomic_helper.free_work,
-		  intel_atomic_helper_free_state);
+		  intel_atomic_helper_free_state_worker);
 
 
 	intel_init_quirks(dev);
 	intel_init_quirks(dev);
 
 

+ 4 - 6
drivers/gpu/drm/i915/intel_fbdev.c

@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 				    bool *enabled, int width, int height)
 				    bool *enabled, int width, int height)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
 	struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
-	unsigned long conn_configured, mask;
+	unsigned long conn_configured, conn_seq, mask;
 	unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
 	unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
 	int i, j;
 	int i, j;
 	bool *save_enabled;
 	bool *save_enabled;
 	bool fallback = true;
 	bool fallback = true;
 	int num_connectors_enabled = 0;
 	int num_connectors_enabled = 0;
 	int num_connectors_detected = 0;
 	int num_connectors_detected = 0;
-	int pass = 0;
 
 
 	save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
 	save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
 	if (!save_enabled)
 	if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
 	mask = BIT(count) - 1;
 	mask = BIT(count) - 1;
 	conn_configured = 0;
 	conn_configured = 0;
 retry:
 retry:
+	conn_seq = conn_configured;
 	for (i = 0; i < count; i++) {
 	for (i = 0; i < count; i++) {
 		struct drm_fb_helper_connector *fb_conn;
 		struct drm_fb_helper_connector *fb_conn;
 		struct drm_connector *connector;
 		struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
 		if (conn_configured & BIT(i))
 		if (conn_configured & BIT(i))
 			continue;
 			continue;
 
 
-		if (pass == 0 && !connector->has_tile)
+		if (conn_seq == 0 && !connector->has_tile)
 			continue;
 			continue;
 
 
 		if (connector->status == connector_status_connected)
 		if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
 		conn_configured |= BIT(i);
 		conn_configured |= BIT(i);
 	}
 	}
 
 
-	if ((conn_configured & mask) != mask) {
-		pass++;
+	if ((conn_configured & mask) != mask && conn_configured != conn_seq)
 		goto retry;
 		goto retry;
-	}
 
 
 	/*
 	/*
 	 * If the BIOS didn't enable everything it could, fall back to have the
 	 * If the BIOS didn't enable everything it could, fall back to have the

+ 13 - 5
drivers/gpu/drm/i915/intel_pm.c

@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 		break;
 		break;
 	}
 	}
 
 
+	/* When byt can survive without system hang with dynamic
+	 * sw freq adjustments, this restriction can be lifted.
+	 */
+	if (IS_VALLEYVIEW(dev_priv))
+		goto skip_hw_write;
+
 	I915_WRITE(GEN6_RP_UP_EI,
 	I915_WRITE(GEN6_RP_UP_EI,
 		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
 		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
 	I915_WRITE(GEN6_RP_UP_THRESHOLD,
 	I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 		   GEN6_RP_UP_BUSY_AVG |
 		   GEN6_RP_UP_BUSY_AVG |
 		   GEN6_RP_DOWN_IDLE_AVG);
 		   GEN6_RP_DOWN_IDLE_AVG);
 
 
+skip_hw_write:
 	dev_priv->rps.power = new_power;
 	dev_priv->rps.power = new_power;
 	dev_priv->rps.up_threshold = threshold_up;
 	dev_priv->rps.up_threshold = threshold_up;
 	dev_priv->rps.down_threshold = threshold_down;
 	dev_priv->rps.down_threshold = threshold_down;
@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
  * @timeout_base_ms: timeout for polling with preemption enabled
  * @timeout_base_ms: timeout for polling with preemption enabled
  *
  *
  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
  * The request is acknowledged once the PCODE reply dword equals @reply after
  * The request is acknowledged once the PCODE reply dword equals @reply after
  * applying @reply_mask. Polling is first attempted with preemption enabled
  * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
  * preemption disabled.
  * preemption disabled.
  *
  *
  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
 	 * worst case) _and_ PCODE was busy for some reason even after a
 	 * worst case) _and_ PCODE was busy for some reason even after a
 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
 	 * the poll with preemption disabled to maximize the number of
 	 * the poll with preemption disabled to maximize the number of
-	 * requests. Increase the timeout from @timeout_base_ms to 10ms to
+	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
 	 * account for interrupts that could reduce the number of these
 	 * account for interrupts that could reduce the number of these
-	 * requests.
+	 * requests, and for any quirks of the PCODE firmware that delays
+	 * the request completion.
 	 */
 	 */
 	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
 	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
 	WARN_ON_ONCE(timeout_base_ms > 3);
 	WARN_ON_ONCE(timeout_base_ms > 3);
 	preempt_disable();
 	preempt_disable();
-	ret = wait_for_atomic(COND, 10);
+	ret = wait_for_atomic(COND, 50);
 	preempt_enable();
 	preempt_enable();
 
 
 out:
 out:

+ 0 - 3
drivers/gpu/drm/i915/intel_sprite.c

@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
 		int scaler_id = plane_state->scaler_id;
 		int scaler_id = plane_state->scaler_id;
 		const struct intel_scaler *scaler;
 		const struct intel_scaler *scaler;
 
 
-		DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
-			      plane_id, PS_PLANE_SEL(plane_id));
-
 		scaler = &crtc_state->scaler_state.scalers[scaler_id];
 		scaler = &crtc_state->scaler_state.scalers[scaler_id];
 
 
 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),

+ 6 - 7
drivers/gpu/drm/i915/intel_uncore.c

@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 
 
 	for_each_fw_domain_masked(d, fw_domains, dev_priv)
 	for_each_fw_domain_masked(d, fw_domains, dev_priv)
 		fw_domain_wait_ack(d);
 		fw_domain_wait_ack(d);
+
+	dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 }
 
 
 static void
 static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 		fw_domain_put(d);
 		fw_domain_put(d);
 		fw_domain_posting_read(d);
 		fw_domain_posting_read(d);
 	}
 	}
+
+	dev_priv->uncore.fw_domains_active &= ~fw_domains;
 }
 }
 
 
 static void
 static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 	if (WARN_ON(domain->wake_count == 0))
 	if (WARN_ON(domain->wake_count == 0))
 		domain->wake_count++;
 		domain->wake_count++;
 
 
-	if (--domain->wake_count == 0) {
+	if (--domain->wake_count == 0)
 		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
 		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
-		dev_priv->uncore.fw_domains_active &= ~domain->mask;
-	}
 
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
 
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 			fw_domains &= ~domain->mask;
 			fw_domains &= ~domain->mask;
 	}
 	}
 
 
-	if (fw_domains) {
+	if (fw_domains)
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-		dev_priv->uncore.fw_domains_active |= fw_domains;
-	}
 }
 }
 
 
 /**
 /**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
 		fw_domain_arm_timer(domain);
 		fw_domain_arm_timer(domain);
 
 
 	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
 	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-	dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 }
 
 
 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,

+ 0 - 3
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c

@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
 	struct drm_gem_object *obj = buffer->priv;
 	struct drm_gem_object *obj = buffer->priv;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (WARN_ON(!obj->filp))
-		return -EINVAL;
-
 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;

+ 6 - 0
drivers/gpu/drm/radeon/si_dpm.c

@@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 		    (rdev->pdev->device == 0x6667)) {
 		    (rdev->pdev->device == 0x6667)) {
 			max_sclk = 75000;
 			max_sclk = 75000;
 		}
 		}
+	} else if (rdev->family == CHIP_OLAND) {
+		if ((rdev->pdev->device == 0x6604) &&
+		    (rdev->pdev->subsystem_vendor == 0x1028) &&
+		    (rdev->pdev->subsystem_device == 0x066F)) {
+			max_sclk = 75000;
+		}
 	}
 	}
 
 
 	if (rps->vce_active) {
 	if (rps->vce_active) {

+ 24 - 13
drivers/gpu/drm/tilcdc/tilcdc_crtc.c

@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 {
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	unsigned long flags;
 
 
 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 	mutex_lock(&tilcdc_crtc->enable_lock);
 	mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 	tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
 	tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
 			  LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
 			  LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
 			  LCDC_PALETTE_LOAD_MODE_MASK);
 			  LCDC_PALETTE_LOAD_MODE_MASK);
+
+	/* There is no real chance for a race here as the time stamp
+	 * is taken before the raster DMA is started. The spin-lock is
+	 * taken to have a memory barrier after taking the time-stamp
+	 * and to avoid a context switch between taking the stamp and
+	 * enabling the raster.
+	 */
+	spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+	tilcdc_crtc->last_vblank = ktime_get();
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+	spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
 
 	drm_crtc_vblank_on(crtc);
 	drm_crtc_vblank_on(crtc);
 
 
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
 	}
 	}
 
 
 	drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
 	drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-	tilcdc_crtc->last_vblank = 0;
 
 
 	tilcdc_crtc->enabled = false;
 	tilcdc_crtc->enabled = false;
 	mutex_unlock(&tilcdc_crtc->enable_lock);
 	mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
 {
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
-	unsigned long flags;
 
 
 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
 	drm_framebuffer_reference(fb);
 	drm_framebuffer_reference(fb);
 
 
 	crtc->primary->fb = fb;
 	crtc->primary->fb = fb;
+	tilcdc_crtc->event = event;
 
 
-	spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+	mutex_lock(&tilcdc_crtc->enable_lock);
 
 
-	if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+	if (tilcdc_crtc->enabled) {
+		unsigned long flags;
 		ktime_t next_vblank;
 		ktime_t next_vblank;
 		s64 tdiff;
 		s64 tdiff;
 
 
-		next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
-			1000000 / crtc->hwmode.vrefresh);
+		spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
 
 
+		next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+					   1000000 / crtc->hwmode.vrefresh);
 		tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
 		tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
 
 
 		if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
 		if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
 			tilcdc_crtc->next_fb = fb;
 			tilcdc_crtc->next_fb = fb;
-	}
-
-	if (tilcdc_crtc->next_fb != fb)
-		set_scanout(crtc, fb);
+		else
+			set_scanout(crtc, fb);
 
 
-	tilcdc_crtc->event = event;
+		spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+	}
 
 
-	spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+	mutex_unlock(&tilcdc_crtc->enable_lock);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
 
 
 fail:
 fail:
 	tilcdc_crtc_destroy(crtc);
 	tilcdc_crtc_destroy(crtc);
-	return -ENOMEM;
+	return ret;
 }
 }

+ 3 - 0
drivers/isdn/gigaset/bas-gigaset.c

@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
+	if (hostif->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	dev_info(&udev->dev,
 	dev_info(&udev->dev,
 		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
 		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),

+ 1 - 0
drivers/macintosh/macio_asic.c

@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
 	 * To get all the fields, copy all archdata
 	 * To get all the fields, copy all archdata
 	 */
 	 */
 	dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
 	dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
+	dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
 #endif /* CONFIG_PCI */
 #endif /* CONFIG_PCI */
 
 
 #ifdef DEBUG
 #ifdef DEBUG

+ 16 - 13
drivers/md/dm.c

@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
 	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
 	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
 	struct bio_list list;
 	struct bio_list list;
 	struct bio *bio;
 	struct bio *bio;
+	int i;
 
 
 	INIT_LIST_HEAD(&o->cb.list);
 	INIT_LIST_HEAD(&o->cb.list);
 
 
 	if (unlikely(!current->bio_list))
 	if (unlikely(!current->bio_list))
 		return;
 		return;
 
 
-	list = *current->bio_list;
-	bio_list_init(current->bio_list);
-
-	while ((bio = bio_list_pop(&list))) {
-		struct bio_set *bs = bio->bi_pool;
-		if (unlikely(!bs) || bs == fs_bio_set) {
-			bio_list_add(current->bio_list, bio);
-			continue;
+	for (i = 0; i < 2; i++) {
+		list = current->bio_list[i];
+		bio_list_init(&current->bio_list[i]);
+
+		while ((bio = bio_list_pop(&list))) {
+			struct bio_set *bs = bio->bi_pool;
+			if (unlikely(!bs) || bs == fs_bio_set) {
+				bio_list_add(&current->bio_list[i], bio);
+				continue;
+			}
+
+			spin_lock(&bs->rescue_lock);
+			bio_list_add(&bs->rescue_list, bio);
+			queue_work(bs->rescue_workqueue, &bs->rescue_work);
+			spin_unlock(&bs->rescue_lock);
 		}
 		}
-
-		spin_lock(&bs->rescue_lock);
-		bio_list_add(&bs->rescue_list, bio);
-		queue_work(bs->rescue_workqueue, &bs->rescue_work);
-		spin_unlock(&bs->rescue_lock);
 	}
 	}
 }
 }
 
 

+ 1 - 1
drivers/md/md-cluster.c

@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
 		bm_lockres->flags |= DLM_LKF_NOQUEUE;
 		bm_lockres->flags |= DLM_LKF_NOQUEUE;
 		ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
 		ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
 		if (ret == -EAGAIN) {
 		if (ret == -EAGAIN) {
-			memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
 			s = read_resync_info(mddev, bm_lockres);
 			s = read_resync_info(mddev, bm_lockres);
 			if (s) {
 			if (s) {
 				pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
 				pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
 	lockres_free(cinfo->bitmap_lockres);
 	lockres_free(cinfo->bitmap_lockres);
 	unlock_all_bitmaps(mddev);
 	unlock_all_bitmaps(mddev);
 	dlm_release_lockspace(cinfo->lockspace, 2);
 	dlm_release_lockspace(cinfo->lockspace, 2);
+	kfree(cinfo);
 	return 0;
 	return 0;
 }
 }
 
 

部分文件因文件數量過多而無法顯示