浏览代码

Merge commit 'fc69910f329d' into uaccess.mips

backmerge of a build fix from mainline
Al Viro 8 年之前
父节点
当前提交
054838bc01
共有 100 个文件被更改,包括 1034 次插入265 次删除
  1. 6 0
      Documentation/admin-guide/kernel-parameters.txt
  2. 1 1
      Documentation/trace/kprobetrace.txt
  3. 1 1
      Documentation/trace/uprobetracer.txt
  4. 2 0
      arch/arm/xen/mm.c
  5. 1 1
      arch/avr32/oprofile/backtrace.c
  6. 1 1
      arch/h8300/kernel/ptrace_h.c
  7. 2 0
      arch/mips/cavium-octeon/cpu.c
  8. 1 0
      arch/mips/cavium-octeon/crypto/octeon-crypto.c
  9. 1 0
      arch/mips/cavium-octeon/smp.c
  10. 1 0
      arch/mips/include/asm/fpu.h
  11. 1 0
      arch/mips/kernel/smp-bmips.c
  12. 1 0
      arch/mips/kernel/smp-mt.c
  13. 1 0
      arch/mips/loongson64/loongson-3/cop2-ex.c
  14. 1 0
      arch/mips/netlogic/common/smp.c
  15. 3 0
      arch/mips/netlogic/xlp/cop2-ex.c
  16. 1 0
      arch/mips/sgi-ip22/ip28-berr.c
  17. 2 0
      arch/mips/sgi-ip27/ip27-berr.c
  18. 3 0
      arch/mips/sgi-ip27/ip27-smp.c
  19. 1 0
      arch/mips/sgi-ip32/ip32-berr.c
  20. 1 0
      arch/mips/sgi-ip32/ip32-reset.c
  21. 72 66
      arch/powerpc/Kconfig
  22. 10 1
      arch/powerpc/Makefile
  23. 1 1
      arch/powerpc/include/asm/checksum.h
  24. 2 2
      arch/powerpc/include/asm/cpuidle.h
  25. 2 2
      arch/powerpc/include/asm/elf.h
  26. 1 1
      arch/powerpc/include/asm/nohash/pgtable.h
  27. 7 0
      arch/powerpc/include/asm/ppc-opcode.h
  28. 12 6
      arch/powerpc/include/asm/prom.h
  29. 6 4
      arch/powerpc/kernel/idle_book3s.S
  30. 113 7
      arch/powerpc/kernel/prom_init.c
  31. 4 1
      arch/powerpc/kernel/setup_64.c
  32. 1 0
      arch/powerpc/lib/Makefile
  33. 0 20
      arch/powerpc/lib/sstep.c
  34. 434 0
      arch/powerpc/lib/test_emulate_step.c
  35. 30 6
      arch/powerpc/mm/init_64.c
  36. 4 0
      arch/powerpc/mm/pgtable-radix.c
  37. 2 2
      arch/powerpc/platforms/powernv/opal-wrappers.S
  38. 10 0
      arch/powerpc/sysdev/xics/icp-opal.c
  39. 14 3
      arch/powerpc/sysdev/xics/xics-common.c
  40. 1 1
      arch/s390/configs/default_defconfig
  41. 1 1
      arch/s390/configs/gcov_defconfig
  42. 1 1
      arch/s390/configs/performance_defconfig
  43. 1 1
      arch/s390/defconfig
  44. 1 0
      arch/x86/configs/x86_64_defconfig
  45. 1 1
      arch/x86/events/amd/core.c
  46. 1 1
      arch/x86/events/intel/cstate.c
  47. 1 1
      arch/x86/events/intel/rapl.c
  48. 3 3
      arch/x86/events/intel/uncore.h
  49. 1 1
      arch/x86/hyperv/hv_init.c
  50. 9 6
      arch/x86/include/asm/pkeys.h
  51. 1 1
      arch/x86/include/uapi/asm/bootparam.h
  52. 7 16
      arch/x86/kernel/apic/apic.c
  53. 0 4
      arch/x86/kernel/cpu/amd.c
  54. 0 2
      arch/x86/kernel/cpu/centaur.c
  55. 0 3
      arch/x86/kernel/cpu/common.c
  56. 0 1
      arch/x86/kernel/cpu/cyrix.c
  57. 0 4
      arch/x86/kernel/cpu/intel.c
  58. 0 1
      arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
  59. 0 2
      arch/x86/kernel/cpu/transmeta.c
  60. 0 1
      arch/x86/kernel/cpu/vmware.c
  61. 1 1
      arch/x86/kernel/hpet.c
  62. 1 1
      arch/x86/kernel/kdebugfs.c
  63. 1 1
      arch/x86/kernel/kprobes/common.h
  64. 3 3
      arch/x86/kernel/kprobes/core.c
  65. 1 1
      arch/x86/kernel/kprobes/opt.c
  66. 16 0
      arch/x86/kernel/reboot.c
  67. 23 12
      arch/x86/kernel/tsc.c
  68. 9 0
      arch/x86/pci/common.c
  69. 0 1
      arch/x86/platform/uv/tlb_uv.c
  70. 6 5
      arch/x86/purgatory/purgatory.c
  71. 8 0
      arch/x86/purgatory/purgatory.h
  72. 1 0
      arch/x86/purgatory/setup-x86_64.S
  73. 2 0
      drivers/acpi/internal.h
  74. 16 6
      drivers/acpi/ioapic.c
  75. 2 2
      drivers/acpi/pci_root.c
  76. 1 1
      drivers/char/nwbutton.c
  77. 1 0
      drivers/firmware/efi/arm-runtime.c
  78. 2 2
      drivers/firmware/efi/libstub/secureboot.c
  79. 2 1
      drivers/irqchip/irq-crossbar.c
  80. 47 0
      drivers/xen/swiotlb-xen.c
  81. 4 4
      fs/timerfd.c
  82. 9 2
      include/linux/jump_label.h
  83. 1 1
      include/linux/user_namespace.h
  84. 1 0
      include/trace/events/syscalls.h
  85. 11 0
      include/xen/swiotlb-xen.h
  86. 9 2
      kernel/locking/lockdep.c
  87. 3 3
      kernel/locking/test-ww_mutex.c
  88. 8 3
      kernel/sched/core.c
  89. 1 1
      kernel/sched/fair.c
  90. 5 0
      kernel/sched/features.h
  91. 1 1
      kernel/time/jiffies.c
  92. 3 3
      kernel/trace/Kconfig
  93. 2 2
      kernel/trace/Makefile
  94. 18 5
      kernel/trace/ftrace.c
  95. 5 5
      kernel/trace/trace.c
  96. 2 2
      kernel/trace/trace_probe.h
  97. 11 7
      kernel/ucount.c
  98. 2 2
      lib/radix-tree.c
  99. 7 7
      lib/refcount.c
  100. 2 0
      scripts/module-common.lds

+ 6 - 0
Documentation/admin-guide/kernel-parameters.txt

@@ -1183,6 +1183,12 @@
 			functions that can be changed at run time by the
 			functions that can be changed at run time by the
 			set_graph_notrace file in the debugfs tracing directory.
 			set_graph_notrace file in the debugfs tracing directory.
 
 
+	ftrace_graph_max_depth=<uint>
+			[FTRACE] Used with the function graph tracer. This is
+			the max depth it will trace into a function. This value
+			can be changed at run time by the max_graph_depth file
+			in the tracefs tracing directory. default: 0 (no limit)
+
 	gamecon.map[2|3]=
 	gamecon.map[2|3]=
 			[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
 			[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
 			support via parallel port (up to 5 devices per port)
 			support via parallel port (up to 5 devices per port)

+ 1 - 1
Documentation/trace/kprobetrace.txt

@@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
 functions). Unlike the Tracepoint based event, this can be added and removed
 functions). Unlike the Tracepoint based event, this can be added and removed
 dynamically, on the fly.
 dynamically, on the fly.
 
 
-To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
 
 
 Similar to the events tracer, this doesn't need to be activated via
 Similar to the events tracer, this doesn't need to be activated via
 current_tracer. Instead of that, add probe points via
 current_tracer. Instead of that, add probe points via

+ 1 - 1
Documentation/trace/uprobetracer.txt

@@ -7,7 +7,7 @@
 Overview
 Overview
 --------
 --------
 Uprobe based trace events are similar to kprobe based trace events.
 Uprobe based trace events are similar to kprobe based trace events.
-To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
 
 
 Similar to the kprobe-event tracer, this doesn't need to be activated via
 Similar to the kprobe-event tracer, this doesn't need to be activated via
 current_tracer. Instead of that, add probe points via
 current_tracer. Instead of that, add probe points via

+ 2 - 0
arch/arm/xen/mm.c

@@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
 	.unmap_page = xen_swiotlb_unmap_page,
 	.unmap_page = xen_swiotlb_unmap_page,
 	.dma_supported = xen_swiotlb_dma_supported,
 	.dma_supported = xen_swiotlb_dma_supported,
 	.set_dma_mask = xen_swiotlb_set_dma_mask,
 	.set_dma_mask = xen_swiotlb_set_dma_mask,
+	.mmap = xen_swiotlb_dma_mmap,
+	.get_sgtable = xen_swiotlb_get_sgtable,
 };
 };
 
 
 int __init xen_mm_init(void)
 int __init xen_mm_init(void)

+ 1 - 1
arch/avr32/oprofile/backtrace.c

@@ -14,7 +14,7 @@
  */
  */
 
 
 #include <linux/oprofile.h>
 #include <linux/oprofile.h>
-#include <linux/sched.h>
+#include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 
 
 /* The first two words of each frame on the stack look like this if we have
 /* The first two words of each frame on the stack look like this if we have

+ 1 - 1
arch/h8300/kernel/ptrace_h.c

@@ -9,7 +9,7 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 
 
 #define BREAKINST 0x5730 /* trapa #3 */
 #define BREAKINST 0x5730 /* trapa #3 */

+ 2 - 0
arch/mips/cavium-octeon/cpu.c

@@ -10,7 +10,9 @@
 #include <linux/irqflags.h>
 #include <linux/irqflags.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
 #include <linux/prefetch.h>
 #include <linux/prefetch.h>
+#include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 
 
 #include <asm/cop2.h>
 #include <asm/cop2.h>
 #include <asm/current.h>
 #include <asm/current.h>

+ 1 - 0
arch/mips/cavium-octeon/crypto/octeon-crypto.c

@@ -9,6 +9,7 @@
 #include <asm/cop2.h>
 #include <asm/cop2.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
 
 
 #include "octeon-crypto.h"
 #include "octeon-crypto.h"
 
 

+ 1 - 0
arch/mips/cavium-octeon/smp.c

@@ -12,6 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/kernel_stat.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/sched/hotplug.h>
 #include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/export.h>
 
 

+ 1 - 0
arch/mips/include/asm/fpu.h

@@ -12,6 +12,7 @@
 
 
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 
 

+ 1 - 0
arch/mips/kernel/smp-bmips.c

@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/sched/hotplug.h>
 #include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
 #include <linux/smp.h>

+ 1 - 0
arch/mips/kernel/smp-mt.c

@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irqchip/mips-gic.h>
 #include <linux/irqchip/mips-gic.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 
 
 #include <linux/atomic.h>
 #include <linux/atomic.h>

+ 1 - 0
arch/mips/loongson64/loongson-3/cop2-ex.c

@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
+#include <linux/ptrace.h>
 
 
 #include <asm/fpu.h>
 #include <asm/fpu.h>
 #include <asm/cop2.h>
 #include <asm/cop2.h>

+ 1 - 0
arch/mips/netlogic/common/smp.c

@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/sched/task_stack.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 
 

+ 3 - 0
arch/mips/netlogic/xlp/cop2-ex.c

@@ -9,11 +9,14 @@
  * Copyright (C) 2009 Wind River Systems,
  * Copyright (C) 2009 Wind River Systems,
  *   written by Ralf Baechle <ralf@linux-mips.org>
  *   written by Ralf Baechle <ralf@linux-mips.org>
  */
  */
+#include <linux/capability.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/irqflags.h>
 #include <linux/irqflags.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
 #include <linux/prefetch.h>
 #include <linux/prefetch.h>
+#include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 
 
 #include <asm/cop2.h>
 #include <asm/cop2.h>
 #include <asm/current.h>
 #include <asm/current.h>

+ 1 - 0
arch/mips/sgi-ip22/ip28-berr.c

@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 
 
 #include <asm/addrspace.h>
 #include <asm/addrspace.h>

+ 2 - 0
arch/mips/sgi-ip27/ip27-berr.c

@@ -12,7 +12,9 @@
 #include <linux/signal.h>	/* for SIGBUS */
 #include <linux/signal.h>	/* for SIGBUS */
 #include <linux/sched.h>	/* schow_regs(), force_sig() */
 #include <linux/sched.h>	/* schow_regs(), force_sig() */
 #include <linux/sched/debug.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 
 
+#include <asm/ptrace.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/sn0/hub.h>
 #include <asm/sn/sn0/hub.h>

+ 3 - 0
arch/mips/sgi-ip27/ip27-smp.c

@@ -8,10 +8,13 @@
  */
  */
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/topology.h>
 #include <linux/topology.h>
 #include <linux/nodemask.h>
 #include <linux/nodemask.h>
+
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/gda.h>
 #include <asm/sn/gda.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/intr.h>

+ 1 - 0
arch/mips/sgi-ip32/ip32-berr.c

@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <asm/addrspace.h>
 #include <asm/addrspace.h>

+ 1 - 0
arch/mips/sgi-ip32/ip32-reset.c

@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/notifier.h>
 #include <linux/notifier.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/rtc/ds1685.h>
 #include <linux/rtc/ds1685.h>

+ 72 - 66
arch/powerpc/Kconfig

@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
 config PPC
 config PPC
 	bool
 	bool
 	default y
 	default y
-	select BUILDTIME_EXTABLE_SORT
+	#
+	# Please keep this list sorted alphabetically.
+	#
+	select ARCH_HAS_DEVMEM_IS_ALLOWED
+	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select ARCH_HAS_ELF_RANDOMIZE
+	select ARCH_HAS_GCOV_PROFILE_ALL
+	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE
+	select ARCH_HAS_SG_CHAIN
+	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
+	select ARCH_HAS_UBSAN_SANITIZE_ALL
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_SUPPORTS_ATOMIC_RMW
+	select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+	select ARCH_USE_BUILTIN_BSWAP
+	select ARCH_USE_CMPXCHG_LOCKREF		if PPC64
+	select ARCH_WANT_IPC_PARSE_VERSION
 	select BINFMT_ELF
 	select BINFMT_ELF
-	select ARCH_HAS_ELF_RANDOMIZE
-	select OF
-	select OF_EARLY_FLATTREE
-	select OF_RESERVED_MEM
-	select HAVE_FTRACE_MCOUNT_RECORD
+	select BUILDTIME_EXTABLE_SORT
+	select CLONE_BACKWARDS
+	select DCACHE_WORD_ACCESS		if PPC64 && CPU_LITTLE_ENDIAN
+	select EDAC_ATOMIC_SCRUB
+	select EDAC_SUPPORT
+	select GENERIC_ATOMIC64			if PPC32
+	select GENERIC_CLOCKEVENTS
+	select GENERIC_CLOCKEVENTS_BROADCAST	if SMP
+	select GENERIC_CMOS_UPDATE
+	select GENERIC_CPU_AUTOPROBE
+	select GENERIC_IRQ_SHOW
+	select GENERIC_IRQ_SHOW_LEVEL
+	select GENERIC_SMP_IDLE_THREAD
+	select GENERIC_STRNCPY_FROM_USER
+	select GENERIC_STRNLEN_USER
+	select GENERIC_TIME_VSYSCALL_OLD
+	select HAVE_ARCH_AUDITSYSCALL
+	select HAVE_ARCH_HARDENED_USERCOPY
+	select HAVE_ARCH_JUMP_LABEL
+	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_SECCOMP_FILTER
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_CBPF_JIT			if !PPC64
+	select HAVE_CONTEXT_TRACKING		if PPC64
+	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DEBUG_STACKOVERFLOW
+	select HAVE_DMA_API_DEBUG
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE
-	select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
-	select HAVE_FUNCTION_TRACER
+	select HAVE_DYNAMIC_FTRACE_WITH_REGS	if MPROFILE_KERNEL
+	select HAVE_EBPF_JIT			if PPC64
+	select HAVE_EFFICIENT_UNALIGNED_ACCESS	if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FUNCTION_TRACER
 	select HAVE_GCC_PLUGINS
 	select HAVE_GCC_PLUGINS
-	select SYSCTL_EXCEPTION_TRACE
-	select VIRT_TO_BUS if !PPC64
+	select HAVE_GENERIC_RCU_GUP
+	select HAVE_HW_BREAKPOINT		if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_IOREMAP_PROT
 	select HAVE_IOREMAP_PROT
-	select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+	select HAVE_IRQ_EXIT_ON_IRQ_STACK
+	select HAVE_KERNEL_GZIP
 	select HAVE_KPROBES
 	select HAVE_KPROBES
-	select HAVE_OPTPROBES if PPC64
-	select HAVE_ARCH_KGDB
 	select HAVE_KRETPROBES
 	select HAVE_KRETPROBES
-	select HAVE_ARCH_TRACEHOOK
+	select HAVE_LIVEPATCH			if HAVE_DYNAMIC_FTRACE_WITH_REGS
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_MEMBLOCK_NODE_MAP
-	select HAVE_DMA_API_DEBUG
+	select HAVE_MOD_ARCH_SPECIFIC
+	select HAVE_NMI				if PERF_EVENTS
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
-	select HAVE_DEBUG_KMEMLEAK
-	select ARCH_HAS_SG_CHAIN
-	select GENERIC_ATOMIC64 if PPC32
+	select HAVE_OPTPROBES			if PPC64
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
+	select HAVE_PERF_EVENTS_NMI		if PPC64
 	select HAVE_PERF_REGS
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_PERF_USER_STACK_DUMP
+	select HAVE_RCU_TABLE_FREE		if SMP
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
-	select ARCH_WANT_IPC_PARSE_VERSION
-	select SPARSE_IRQ
+	select HAVE_SYSCALL_TRACEPOINTS
+	select HAVE_VIRT_CPU_ACCOUNTING
 	select IRQ_DOMAIN
 	select IRQ_DOMAIN
-	select GENERIC_IRQ_SHOW
-	select GENERIC_IRQ_SHOW_LEVEL
 	select IRQ_FORCED_THREADING
 	select IRQ_FORCED_THREADING
-	select HAVE_RCU_TABLE_FREE if SMP
-	select HAVE_SYSCALL_TRACEPOINTS
-	select HAVE_CBPF_JIT if !PPC64
-	select HAVE_EBPF_JIT if PPC64
-	select HAVE_ARCH_JUMP_LABEL
-	select ARCH_HAVE_NMI_SAFE_CMPXCHG
-	select ARCH_HAS_GCOV_PROFILE_ALL
-	select GENERIC_SMP_IDLE_THREAD
-	select GENERIC_CMOS_UPDATE
-	select GENERIC_TIME_VSYSCALL_OLD
-	select GENERIC_CLOCKEVENTS
-	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
-	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
-	select GENERIC_STRNCPY_FROM_USER
-	select GENERIC_STRNLEN_USER
-	select HAVE_MOD_ARCH_SPECIFIC
 	select MODULES_USE_ELF_RELA
 	select MODULES_USE_ELF_RELA
-	select CLONE_BACKWARDS
-	select ARCH_USE_BUILTIN_BSWAP
-	select OLD_SIGSUSPEND
-	select OLD_SIGACTION if PPC32
-	select HAVE_DEBUG_STACKOVERFLOW
-	select HAVE_IRQ_EXIT_ON_IRQ_STACK
-	select ARCH_USE_CMPXCHG_LOCKREF if PPC64
-	select HAVE_ARCH_AUDITSYSCALL
-	select ARCH_SUPPORTS_ATOMIC_RMW
-	select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
 	select NO_BOOTMEM
 	select NO_BOOTMEM
-	select HAVE_GENERIC_RCU_GUP
-	select HAVE_PERF_EVENTS_NMI if PPC64
-	select HAVE_NMI if PERF_EVENTS
-	select EDAC_SUPPORT
-	select EDAC_ATOMIC_SCRUB
-	select ARCH_HAS_DMA_SET_COHERENT_MASK
-	select ARCH_HAS_DEVMEM_IS_ALLOWED
-	select HAVE_ARCH_SECCOMP_FILTER
-	select ARCH_HAS_UBSAN_SANITIZE_ALL
-	select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
-	select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
-	select GENERIC_CPU_AUTOPROBE
-	select HAVE_VIRT_CPU_ACCOUNTING
-	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
-	select HAVE_ARCH_HARDENED_USERCOPY
-	select HAVE_KERNEL_GZIP
-	select HAVE_CONTEXT_TRACKING if PPC64
+	select OF
+	select OF_EARLY_FLATTREE
+	select OF_RESERVED_MEM
+	select OLD_SIGACTION			if PPC32
+	select OLD_SIGSUSPEND
+	select SPARSE_IRQ
+	select SYSCTL_EXCEPTION_TRACE
+	select VIRT_TO_BUS			if !PPC64
+	#
+	# Please keep this list sorted alphabetically.
+	#
 
 
 config GENERIC_CSUM
 config GENERIC_CSUM
 	def_bool n
 	def_bool n

+ 10 - 1
arch/powerpc/Makefile

@@ -72,8 +72,15 @@ GNUTARGET	:= powerpc
 MULTIPLEWORD	:= -mmultiple
 MULTIPLEWORD	:= -mmultiple
 endif
 endif
 
 
-cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mabi=elfv2
+endif
+
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
 ifneq ($(cc-name),clang)
 ifneq ($(cc-name),clang)
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mno-strict-align
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mno-strict-align
 endif
 endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2)
 AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2)
 else
 else
+CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcall-aixdesc)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 endif
 endif
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)

+ 1 - 1
arch/powerpc/include/asm/checksum.h

@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
 
 
 #ifdef __powerpc64__
 #ifdef __powerpc64__
 	res += (__force u64)addend;
 	res += (__force u64)addend;
-	return (__force __wsum)((u32)res + (res >> 32));
+	return (__force __wsum) from64to32(res);
 #else
 #else
 	asm("addc %0,%0,%1;"
 	asm("addc %0,%0,%1;"
 	    "addze %0,%0;"
 	    "addze %0,%0;"

+ 2 - 2
arch/powerpc/include/asm/cpuidle.h

@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
 	std	r0,0(r1);					\
 	std	r0,0(r1);					\
 	ptesync;						\
 	ptesync;						\
 	ld	r0,0(r1);					\
 	ld	r0,0(r1);					\
-1:	cmpd	cr0,r0,r0;					\
-	bne	1b;						\
+236:	cmpd	cr0,r0,r0;					\
+	bne	236b;						\
 	IDLE_INST;						\
 	IDLE_INST;						\
 
 
 #define	IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)			\
 #define	IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)			\

+ 2 - 2
arch/powerpc/include/asm/elf.h

@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define ARCH_DLINFO_CACHE_GEOMETRY					\
 #define ARCH_DLINFO_CACHE_GEOMETRY					\
 	NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);		\
 	NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);		\
 	NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));	\
 	NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));	\
-	NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size);		\
-	NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i));	\
+	NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size);		\
+	NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d));	\
 	NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);		\
 	NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);		\
 	NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));	\
 	NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));	\
 	NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);		\
 	NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);		\

+ 1 - 1
arch/powerpc/include/asm/nohash/pgtable.h

@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
 	return ((hpd_val(hpd) & 0x4) != 0);
 	return ((hpd_val(hpd) & 0x4) != 0);
 #else
 #else
 	/* We clear the top bit to indicate hugepd */
 	/* We clear the top bit to indicate hugepd */
-	return ((hpd_val(hpd) & PD_HUGE) ==  0);
+	return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
 #endif
 #endif
 }
 }
 
 

+ 7 - 0
arch/powerpc/include/asm/ppc-opcode.h

@@ -284,6 +284,13 @@
 #define PPC_INST_BRANCH_COND		0x40800000
 #define PPC_INST_BRANCH_COND		0x40800000
 #define PPC_INST_LBZCIX			0x7c0006aa
 #define PPC_INST_LBZCIX			0x7c0006aa
 #define PPC_INST_STBCIX			0x7c0007aa
 #define PPC_INST_STBCIX			0x7c0007aa
+#define PPC_INST_LWZX			0x7c00002e
+#define PPC_INST_LFSX			0x7c00042e
+#define PPC_INST_STFSX			0x7c00052e
+#define PPC_INST_LFDX			0x7c0004ae
+#define PPC_INST_STFDX			0x7c0005ae
+#define PPC_INST_LVX			0x7c0000ce
+#define PPC_INST_STVX			0x7c0001ce
 
 
 /* macros to insert fields into opcodes */
 /* macros to insert fields into opcodes */
 #define ___PPC_RA(a)	(((a) & 0x1f) << 16)
 #define ___PPC_RA(a)	(((a) & 0x1f) << 16)

+ 12 - 6
arch/powerpc/include/asm/prom.h

@@ -160,12 +160,18 @@ struct of_drconf_cell {
 #define OV5_PFO_HW_ENCR		0x1120	/* PFO Encryption Accelerator */
 #define OV5_PFO_HW_ENCR		0x1120	/* PFO Encryption Accelerator */
 #define OV5_SUB_PROCESSORS	0x1501	/* 1,2,or 4 Sub-Processors supported */
 #define OV5_SUB_PROCESSORS	0x1501	/* 1,2,or 4 Sub-Processors supported */
 #define OV5_XIVE_EXPLOIT	0x1701	/* XIVE exploitation supported */
 #define OV5_XIVE_EXPLOIT	0x1701	/* XIVE exploitation supported */
-#define OV5_MMU_RADIX_300	0x1880	/* ISA v3.00 radix MMU supported */
-#define OV5_MMU_HASH_300	0x1840	/* ISA v3.00 hash MMU supported */
-#define OV5_MMU_SEGM_RADIX	0x1820	/* radix mode (no segmentation) */
-#define OV5_MMU_PROC_TBL	0x1810	/* hcall selects SLB or proc table */
-#define OV5_MMU_SLB		0x1800	/* always use SLB */
-#define OV5_MMU_GTSE		0x1808	/* Guest translation shootdown */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT		0x18C0	/* MMU Mode Support Mask */
+#define OV5_MMU_HASH		0x1800	/* Hash MMU Only */
+#define OV5_MMU_RADIX		0x1840	/* Radix MMU Only */
+#define OV5_MMU_EITHER		0x1880	/* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC		0x18C0	/* Hash or Radix Can Switch Later */
+#define OV5_NMMU		0x1820	/* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL	0x1980	/* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE		0x1940	/* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE		0x1A40	/* Guest Translation Shoot Down Avail */
 
 
 /* Option Vector 6: IBM PAPR hints */
 /* Option Vector 6: IBM PAPR hints */
 #define OV6_LINUX		0x02	/* Linux is our OS */
 #define OV6_LINUX		0x02	/* Linux is our OS */

+ 6 - 4
arch/powerpc/kernel/idle_book3s.S

@@ -276,19 +276,21 @@ power_enter_stop:
  */
  */
 	andis.   r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
 	andis.   r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
 	clrldi   r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
 	clrldi   r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
-	bne	 1f
+	bne	 .Lhandle_esl_ec_set
 	IDLE_STATE_ENTER_SEQ(PPC_STOP)
 	IDLE_STATE_ENTER_SEQ(PPC_STOP)
 	li	r3,0  /* Since we didn't lose state, return 0 */
 	li	r3,0  /* Since we didn't lose state, return 0 */
 	b 	pnv_wakeup_noloss
 	b 	pnv_wakeup_noloss
+
+.Lhandle_esl_ec_set:
 /*
 /*
  * Check if the requested state is a deep idle state.
  * Check if the requested state is a deep idle state.
  */
  */
-1:	LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+	LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
 	ld	r4,ADDROFF(pnv_first_deep_stop_state)(r5)
 	ld	r4,ADDROFF(pnv_first_deep_stop_state)(r5)
 	cmpd	r3,r4
 	cmpd	r3,r4
-	bge	2f
+	bge	.Lhandle_deep_stop
 	IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
 	IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-2:
+.Lhandle_deep_stop:
 /*
 /*
  * Entering deep idle state.
  * Entering deep idle state.
  * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
  * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to

+ 113 - 7
arch/powerpc/kernel/prom_init.c

@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
 static unsigned long __initdata prom_tce_alloc_end;
 static unsigned long __initdata prom_tce_alloc_end;
 #endif
 #endif
 
 
+static bool __initdata prom_radix_disable;
+
+struct platform_support {
+	bool hash_mmu;
+	bool radix_mmu;
+	bool radix_gtse;
+};
+
 /* Platforms codes are now obsolete in the kernel. Now only used within this
 /* Platforms codes are now obsolete in the kernel. Now only used within this
  * file and ultimately gone too. Feel free to change them if you need, they
  * file and ultimately gone too. Feel free to change them if you need, they
  * are not shared with anything outside of this file anymore
  * are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
 #endif
 #endif
 	}
 	}
+
+	opt = strstr(prom_cmd_line, "disable_radix");
+	if (opt) {
+		prom_debug("Radix disabled from cmdline\n");
+		prom_radix_disable = true;
+	}
 }
 }
 
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
 	u8 byte22;
 	u8 byte22;
 	u8 intarch;
 	u8 intarch;
 	u8 mmu;
 	u8 mmu;
+	u8 hash_ext;
+	u8 radix_ext;
 } __packed;
 } __packed;
 
 
 struct option_vector6 {
 struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
 		.reserved3 = 0,
 		.reserved3 = 0,
 		.subprocessors = 1,
 		.subprocessors = 1,
 		.intarch = 0,
 		.intarch = 0,
-		.mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
-			OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
+		.mmu = 0,
+		.hash_ext = 0,
+		.radix_ext = 0,
 	},
 	},
 
 
 	/* option vector 6: IBM PAPR hints */
 	/* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
 
 
 }
 }
 
 
+static void __init prom_parse_mmu_model(u8 val,
+					struct platform_support *support)
+{
+	switch (val) {
+	case OV5_FEAT(OV5_MMU_DYNAMIC):
+	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
+		prom_debug("MMU - either supported\n");
+		support->radix_mmu = !prom_radix_disable;
+		support->hash_mmu = true;
+		break;
+	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
+		prom_debug("MMU - radix only\n");
+		if (prom_radix_disable) {
+			/*
+			 * If we __have__ to do radix, we're better off ignoring
+			 * the command line rather than not booting.
+			 */
+			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
+		}
+		support->radix_mmu = true;
+		break;
+	case OV5_FEAT(OV5_MMU_HASH):
+		prom_debug("MMU - hash only\n");
+		support->hash_mmu = true;
+		break;
+	default:
+		prom_debug("Unknown mmu support option: 0x%x\n", val);
+		break;
+	}
+}
+
+static void __init prom_parse_platform_support(u8 index, u8 val,
+					       struct platform_support *support)
+{
+	switch (index) {
+	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
+		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
+		break;
+	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
+		if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
+			prom_debug("Radix - GTSE supported\n");
+			support->radix_gtse = true;
+		}
+		break;
+	}
+}
+
+static void __init prom_check_platform_support(void)
+{
+	struct platform_support supported = {
+		.hash_mmu = false,
+		.radix_mmu = false,
+		.radix_gtse = false
+	};
+	int prop_len = prom_getproplen(prom.chosen,
+				       "ibm,arch-vec-5-platform-support");
+	if (prop_len > 1) {
+		int i;
+		u8 vec[prop_len];
+		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
+			   prop_len);
+		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+			     &vec, sizeof(vec));
+		for (i = 0; i < prop_len; i += 2) {
+			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+								  , vec[i]
+								  , vec[i + 1]);
+			prom_parse_platform_support(vec[i], vec[i + 1],
+						    &supported);
+		}
+	}
+
+	if (supported.radix_mmu && supported.radix_gtse) {
+		/* Radix preferred - but we require GTSE for now */
+		prom_debug("Asking for radix with GTSE\n");
+		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
+		ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+	} else if (supported.hash_mmu) {
+		/* Default to hash mmu (if we can) */
+		prom_debug("Asking for hash\n");
+		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
+	} else {
+		/* We're probably on a legacy hypervisor */
+		prom_debug("Assuming legacy hash support\n");
+	}
+}
 
 
 static void __init prom_send_capabilities(void)
 static void __init prom_send_capabilities(void)
 {
 {
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
 	prom_arg_t ret;
 	prom_arg_t ret;
 	u32 cores;
 	u32 cores;
 
 
+	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
+	prom_check_platform_support();
+
 	root = call_prom("open", 1, 1, ADDR("/"));
 	root = call_prom("open", 1, 1, ADDR("/"));
 	if (root != 0) {
 	if (root != 0) {
 		/* We need to tell the FW about the number of cores we support.
 		/* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 	 */
 	 */
 	prom_check_initrd(r3, r4);
 	prom_check_initrd(r3, r4);
 
 
+	/*
+	 * Do early parsing of command line
+	 */
+	early_cmdline_parse();
+
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 	/*
 	/*
 	 * On pSeries, inform the firmware about our capabilities
 	 * On pSeries, inform the firmware about our capabilities
@@ -3008,11 +3119,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 	if (of_platform != PLATFORM_POWERMAC)
 	if (of_platform != PLATFORM_POWERMAC)
 		copy_and_flush(0, kbase, 0x100, 0);
 		copy_and_flush(0, kbase, 0x100, 0);
 
 
-	/*
-	 * Do early parsing of command line
-	 */
-	early_cmdline_parse();
-
 	/*
 	/*
 	 * Initialize memory management within prom_init
 	 * Initialize memory management within prom_init
 	 */
 	 */

+ 4 - 1
arch/powerpc/kernel/setup_64.c

@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
 	info->line_size = lsize;
 	info->line_size = lsize;
 	info->block_size = bsize;
 	info->block_size = bsize;
 	info->log_block_size = __ilog2(bsize);
 	info->log_block_size = __ilog2(bsize);
-	info->blocks_per_page = PAGE_SIZE / bsize;
+	if (bsize)
+		info->blocks_per_page = PAGE_SIZE / bsize;
+	else
+		info->blocks_per_page = 0;
 
 
 	if (sets == 0)
 	if (sets == 0)
 		info->assoc = 0xffff;
 		info->assoc = 0xffff;

+ 1 - 0
arch/powerpc/lib/Makefile

@@ -20,6 +20,7 @@ obj64-y	+= copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
 
 
 obj64-$(CONFIG_SMP)	+= locks.o
 obj64-$(CONFIG_SMP)	+= locks.o
 obj64-$(CONFIG_ALTIVEC)	+= vmx-helper.o
 obj64-$(CONFIG_ALTIVEC)	+= vmx-helper.o
+obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
 
 
 obj-y			+= checksum_$(BITS).o checksum_wrappers.o
 obj-y			+= checksum_$(BITS).o checksum_wrappers.o
 
 

+ 0 - 20
arch/powerpc/lib/sstep.c

@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		goto instr_done;
 		goto instr_done;
 
 
 	case LARX:
 	case LARX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 			break;		/* can't handle misaligned */
 		if (!address_ok(regs, op.ea, size))
 		if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		goto ldst_done;
 		goto ldst_done;
 
 
 	case STCX:
 	case STCX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 			break;		/* can't handle misaligned */
 		if (!address_ok(regs, op.ea, size))
 		if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		goto ldst_done;
 		goto ldst_done;
 
 
 	case LOAD:
 	case LOAD:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
 		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
 		if (!err) {
 		if (!err) {
 			if (op.type & SIGNEXT)
 			if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 
 #ifdef CONFIG_PPC_FPU
 #ifdef CONFIG_PPC_FPU
 	case LOAD_FP:
 	case LOAD_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 		if (size == 4)
 			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
 			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
 		else
 		else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #endif
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 	case LOAD_VMX:
 	case LOAD_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
 		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 		goto ldst_done;
 #endif
 #endif
 #ifdef CONFIG_VSX
 #ifdef CONFIG_VSX
 	case LOAD_VSX:
 	case LOAD_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
 		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
 		goto ldst_done;
 		goto ldst_done;
 #endif
 #endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 		goto instr_done;
 		goto instr_done;
 
 
 	case STORE:
 	case STORE:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if ((op.type & UPDATE) && size == sizeof(long) &&
 		if ((op.type & UPDATE) && size == sizeof(long) &&
 		    op.reg == 1 && op.update_reg == 1 &&
 		    op.reg == 1 && op.update_reg == 1 &&
 		    !(regs->msr & MSR_PR) &&
 		    !(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 
 #ifdef CONFIG_PPC_FPU
 #ifdef CONFIG_PPC_FPU
 	case STORE_FP:
 	case STORE_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 		if (size == 4)
 			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
 			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
 		else
 		else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #endif
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 	case STORE_VMX:
 	case STORE_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
 		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 		goto ldst_done;
 #endif
 #endif
 #ifdef CONFIG_VSX
 #ifdef CONFIG_VSX
 	case STORE_VSX:
 	case STORE_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
 		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
 		goto ldst_done;
 		goto ldst_done;
 #endif
 #endif

+ 434 - 0
arch/powerpc/lib/test_emulate_step.c

@@ -0,0 +1,434 @@
+/*
+ * Simple sanity test for emulate_step load/store instructions.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "emulate_step_test: " fmt
+
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/ppc-opcode.h>
+
+#define IMM_L(i)		((uintptr_t)(i) & 0xffff)
+
+/*
+ * Defined with TEST_ prefix so it does not conflict with other
+ * definitions.
+ */
+#define TEST_LD(r, base, i)	(PPC_INST_LD | ___PPC_RT(r) |		\
+					___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZ(r, base, i)	(PPC_INST_LWZ | ___PPC_RT(r) |		\
+					___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZX(t, a, b)	(PPC_INST_LWZX | ___PPC_RT(t) |		\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STD(r, base, i)	(PPC_INST_STD | ___PPC_RS(r) |		\
+					___PPC_RA(base) | ((i) & 0xfffc))
+#define TEST_LDARX(t, a, b, eh)	(PPC_INST_LDARX | ___PPC_RT(t) |	\
+					___PPC_RA(a) | ___PPC_RB(b) |	\
+					__PPC_EH(eh))
+#define TEST_STDCX(s, a, b)	(PPC_INST_STDCX | ___PPC_RS(s) |	\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFSX(t, a, b)	(PPC_INST_LFSX | ___PPC_RT(t) |		\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFSX(s, a, b)	(PPC_INST_STFSX | ___PPC_RS(s) |	\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFDX(t, a, b)	(PPC_INST_LFDX | ___PPC_RT(t) |		\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFDX(s, a, b)	(PPC_INST_STFDX | ___PPC_RS(s) |	\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LVX(t, a, b)	(PPC_INST_LVX | ___PPC_RT(t) |		\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STVX(s, a, b)	(PPC_INST_STVX | ___PPC_RS(s) |		\
+					___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LXVD2X(s, a, b)	(PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b)	(PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+
+
+static void __init init_pt_regs(struct pt_regs *regs)
+{
+	static unsigned long msr;
+	static bool msr_cached;
+
+	memset(regs, 0, sizeof(struct pt_regs));
+
+	if (likely(msr_cached)) {
+		regs->msr = msr;
+		return;
+	}
+
+	asm volatile("mfmsr %0" : "=r"(regs->msr));
+
+	regs->msr |= MSR_FP;
+	regs->msr |= MSR_VEC;
+	regs->msr |= MSR_VSX;
+
+	msr = regs->msr;
+	msr_cached = true;
+}
+
+static void __init show_result(char *ins, char *result)
+{
+	pr_info("%-14s : %s\n", ins, result);
+}
+
+static void __init test_ld(void)
+{
+	struct pt_regs regs;
+	unsigned long a = 0x23;
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+	regs.gpr[3] = (unsigned long) &a;
+
+	/* ld r5, 0(r3) */
+	stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+
+	if (stepped == 1 && regs.gpr[5] == a)
+		show_result("ld", "PASS");
+	else
+		show_result("ld", "FAIL");
+}
+
+static void __init test_lwz(void)
+{
+	struct pt_regs regs;
+	unsigned int a = 0x4545;
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+	regs.gpr[3] = (unsigned long) &a;
+
+	/* lwz r5, 0(r3) */
+	stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+
+	if (stepped == 1 && regs.gpr[5] == a)
+		show_result("lwz", "PASS");
+	else
+		show_result("lwz", "FAIL");
+}
+
+static void __init test_lwzx(void)
+{
+	struct pt_regs regs;
+	unsigned int a[3] = {0x0, 0x0, 0x1234};
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+	regs.gpr[3] = (unsigned long) a;
+	regs.gpr[4] = 8;
+	regs.gpr[5] = 0x8765;
+
+	/* lwzx r5, r3, r4 */
+	stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+	if (stepped == 1 && regs.gpr[5] == a[2])
+		show_result("lwzx", "PASS");
+	else
+		show_result("lwzx", "FAIL");
+}
+
+static void __init test_std(void)
+{
+	struct pt_regs regs;
+	unsigned long a = 0x1234;
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+	regs.gpr[3] = (unsigned long) &a;
+	regs.gpr[5] = 0x5678;
+
+	/* std r5, 0(r3) */
+	stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
+	if (stepped == 1 || regs.gpr[5] == a)
+		show_result("std", "PASS");
+	else
+		show_result("std", "FAIL");
+}
+
+static void __init test_ldarx_stdcx(void)
+{
+	struct pt_regs regs;
+	unsigned long a = 0x1234;
+	int stepped = -1;
+	unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
+
+	init_pt_regs(&regs);
+	asm volatile("mfcr %0" : "=r"(regs.ccr));
+
+
+	/*** ldarx ***/
+
+	regs.gpr[3] = (unsigned long) &a;
+	regs.gpr[4] = 0;
+	regs.gpr[5] = 0x5678;
+
+	/* ldarx r5, r3, r4, 0 */
+	stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+
+	/*
+	 * Don't touch 'a' here. Touching 'a' can do Load/store
+	 * of 'a' which result in failure of subsequent stdcx.
+	 * Instead, use hardcoded value for comparison.
+	 */
+	if (stepped <= 0 || regs.gpr[5] != 0x1234) {
+		show_result("ldarx / stdcx.", "FAIL (ldarx)");
+		return;
+	}
+
+
+	/*** stdcx. ***/
+
+	regs.gpr[5] = 0x9ABC;
+
+	/* stdcx. r5, r3, r4 */
+	stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+
+	/*
+	 * Two possible scenarios that indicates successful emulation
+	 * of stdcx. :
+	 *  1. Reservation is active and store is performed. In this
+	 *     case cr0.eq bit will be set to 1.
+	 *  2. Reservation is not active and store is not performed.
+	 *     In this case cr0.eq bit will be set to 0.
+	 */
+	if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
+			|| (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
+		show_result("ldarx / stdcx.", "PASS");
+	else
+		show_result("ldarx / stdcx.", "FAIL (stdcx.)");
+}
+
+#ifdef CONFIG_PPC_FPU
+static void __init test_lfsx_stfsx(void)
+{
+	struct pt_regs regs;
+	union {
+		float a;
+		int b;
+	} c;
+	int cached_b;
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+
+
+	/*** lfsx ***/
+
+	c.a = 123.45;
+	cached_b = c.b;
+
+	regs.gpr[3] = (unsigned long) &c.a;
+	regs.gpr[4] = 0;
+
+	/* lfsx frt10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+
+	if (stepped == 1)
+		show_result("lfsx", "PASS");
+	else
+		show_result("lfsx", "FAIL");
+
+
+	/*** stfsx ***/
+
+	c.a = 678.91;
+
+	/* stfsx frs10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+
+	if (stepped == 1 && c.b == cached_b)
+		show_result("stfsx", "PASS");
+	else
+		show_result("stfsx", "FAIL");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+	struct pt_regs regs;
+	union {
+		double a;
+		long b;
+	} c;
+	long cached_b;
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+
+
+	/*** lfdx ***/
+
+	c.a = 123456.78;
+	cached_b = c.b;
+
+	regs.gpr[3] = (unsigned long) &c.a;
+	regs.gpr[4] = 0;
+
+	/* lfdx frt10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+
+	if (stepped == 1)
+		show_result("lfdx", "PASS");
+	else
+		show_result("lfdx", "FAIL");
+
+
+	/*** stfdx ***/
+
+	c.a = 987654.32;
+
+	/* stfdx frs10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+
+	if (stepped == 1 && c.b == cached_b)
+		show_result("stfdx", "PASS");
+	else
+		show_result("stfdx", "FAIL");
+}
+#else
+static void __init test_lfsx_stfsx(void)
+{
+	show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+	show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+	show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+	show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static void __init test_lvx_stvx(void)
+{
+	struct pt_regs regs;
+	union {
+		vector128 a;
+		u32 b[4];
+	} c;
+	u32 cached_b[4];
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+
+
+	/*** lvx ***/
+
+	cached_b[0] = c.b[0] = 923745;
+	cached_b[1] = c.b[1] = 2139478;
+	cached_b[2] = c.b[2] = 9012;
+	cached_b[3] = c.b[3] = 982134;
+
+	regs.gpr[3] = (unsigned long) &c.a;
+	regs.gpr[4] = 0;
+
+	/* lvx vrt10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+
+	if (stepped == 1)
+		show_result("lvx", "PASS");
+	else
+		show_result("lvx", "FAIL");
+
+
+	/*** stvx ***/
+
+	c.b[0] = 4987513;
+	c.b[1] = 84313948;
+	c.b[2] = 71;
+	c.b[3] = 498532;
+
+	/* stvx vrs10, r3, r4 */
+	stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+
+	if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+	    cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+		show_result("stvx", "PASS");
+	else
+		show_result("stvx", "FAIL");
+}
+#else
+static void __init test_lvx_stvx(void)
+{
+	show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
+	show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvd2x_stxvd2x(void)
+{
+	struct pt_regs regs;
+	union {
+		vector128 a;
+		u32 b[4];
+	} c;
+	u32 cached_b[4];
+	int stepped = -1;
+
+	init_pt_regs(&regs);
+
+
+	/*** lxvd2x ***/
+
+	cached_b[0] = c.b[0] = 18233;
+	cached_b[1] = c.b[1] = 34863571;
+	cached_b[2] = c.b[2] = 834;
+	cached_b[3] = c.b[3] = 6138911;
+
+	regs.gpr[3] = (unsigned long) &c.a;
+	regs.gpr[4] = 0;
+
+	/* lxvd2x vsr39, r3, r4 */
+	stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+
+	if (stepped == 1)
+		show_result("lxvd2x", "PASS");
+	else
+		show_result("lxvd2x", "FAIL");
+
+
+	/*** stxvd2x ***/
+
+	c.b[0] = 21379463;
+	c.b[1] = 87;
+	c.b[2] = 374234;
+	c.b[3] = 4;
+
+	/* stxvd2x vsr39, r3, r4 */
+	stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+
+	if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+	    cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+		show_result("stxvd2x", "PASS");
+	else
+		show_result("stxvd2x", "FAIL");
+}
+#else
+static void __init test_lxvd2x_stxvd2x(void)
+{
+	show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
+	show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+static int __init test_emulate_step(void)
+{
+	test_ld();
+	test_lwz();
+	test_lwzx();
+	test_std();
+	test_ldarx_stdcx();
+	test_lfsx_stfsx();
+	test_lfdx_stfdx();
+	test_lvx_stvx();
+	test_lxvd2x_stxvd2x();
+
+	return 0;
+}
+late_initcall(test_emulate_step);

+ 30 - 6
arch/powerpc/mm/init_64.c

@@ -356,18 +356,42 @@ static void early_check_vec5(void)
 	unsigned long root, chosen;
 	unsigned long root, chosen;
 	int size;
 	int size;
 	const u8 *vec5;
 	const u8 *vec5;
+	u8 mmu_supported;
 
 
 	root = of_get_flat_dt_root();
 	root = of_get_flat_dt_root();
 	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
 	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
-	if (chosen == -FDT_ERR_NOTFOUND)
+	if (chosen == -FDT_ERR_NOTFOUND) {
+		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 		return;
 		return;
+	}
 	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
 	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
-	if (!vec5)
+	if (!vec5) {
+		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 		return;
 		return;
-	if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
-	    !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
-		/* Hypervisor doesn't support radix */
+	}
+	if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+		return;
+	}
+
+	/* Check for supported configuration */
+	mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
+			OV5_FEAT(OV5_MMU_SUPPORT);
+	if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
+		/* Hypervisor only supports radix - check enabled && GTSE */
+		if (!early_radix_enabled()) {
+			pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+		}
+		if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
+						OV5_FEAT(OV5_RADIX_GTSE))) {
+			pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
+		}
+		/* Do radix anyway - the hypervisor said we had to */
+		cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+	} else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
+		/* Hypervisor only supports hash - disable radix */
+		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+	}
 }
 }
 
 
 void __init mmu_early_init_devtree(void)
 void __init mmu_early_init_devtree(void)
@@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
 	 * even though the ibm,architecture-vec-5 property created by
 	 * even though the ibm,architecture-vec-5 property created by
 	 * skiboot doesn't have the necessary bits set.
 	 * skiboot doesn't have the necessary bits set.
 	 */
 	 */
-	if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+	if (!(mfmsr() & MSR_HV))
 		early_check_vec5();
 		early_check_vec5();
 
 
 	if (early_radix_enabled())
 	if (early_radix_enabled())

+ 4 - 0
arch/powerpc/mm/pgtable-radix.c

@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
 	 */
 	 */
 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+	asm volatile("ptesync" : : : "memory");
+	asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 }
 
 
 static void __init radix_init_partition_table(void)
 static void __init radix_init_partition_table(void)

+ 2 - 2
arch/powerpc/platforms/powernv/opal-wrappers.S

@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
 BEGIN_FTR_SECTION;						\
 BEGIN_FTR_SECTION;						\
 	b	1f;						\
 	b	1f;						\
 END_FTR_SECTION(0, 1);						\
 END_FTR_SECTION(0, 1);						\
-	ld	r12,opal_tracepoint_refcount@toc(r2);		\
-	cmpdi	r12,0;						\
+	ld	r11,opal_tracepoint_refcount@toc(r2);		\
+	cmpdi	r11,0;						\
 	bne-	LABEL;						\
 	bne-	LABEL;						\
 1:
 1:
 
 

+ 10 - 0
arch/powerpc/sysdev/xics/icp-opal.c

@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
 
 
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 {
 {
+	/*
+	 * Here be dragons. The caller has asked to allow only IPI's and not
+	 * external interrupts. But OPAL XIVE doesn't support that. So instead
+	 * of allowing no interrupts allow all. That's still not right, but
+	 * currently the only caller who does this is xics_migrate_irqs_away()
+	 * and it works in that case.
+	 */
+	if (cppr >= DEFAULT_PRIORITY)
+		cppr = LOWEST_PRIORITY;
+
 	xics_set_base_cppr(cppr);
 	xics_set_base_cppr(cppr);
 	opal_int_set_cppr(cppr);
 	opal_int_set_cppr(cppr);
 	iosync();
 	iosync();

+ 14 - 3
arch/powerpc/sysdev/xics/xics-common.c

@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
+#include <linux/delay.h>
 
 
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
 	/* Remove ourselves from the global interrupt queue */
 	/* Remove ourselves from the global interrupt queue */
 	xics_set_cpu_giq(xics_default_distrib_server, 0);
 	xics_set_cpu_giq(xics_default_distrib_server, 0);
 
 
-	/* Allow IPIs again... */
-	icp_ops->set_priority(DEFAULT_PRIORITY);
-
 	for_each_irq_desc(virq, desc) {
 	for_each_irq_desc(virq, desc) {
 		struct irq_chip *chip;
 		struct irq_chip *chip;
 		long server;
 		long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
 unlock:
 unlock:
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 	}
+
+	/* Allow "sufficient" time to drop any inflight IRQ's */
+	mdelay(5);
+
+	/*
+	 * Allow IPIs again. This is done at the very end, after migrating all
+	 * interrupts, the expectation is that we'll only get woken up by an IPI
+	 * interrupt beyond this point, but leave externals masked just to be
+	 * safe. If we're using icp-opal this may actually allow all
+	 * interrupts anyway, but that should be OK.
+	 */
+	icp_ops->set_priority(DEFAULT_PRIORITY);
+
 }
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_HOTPLUG_CPU */
 
 

+ 1 - 1
arch/s390/configs/default_defconfig

@@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_TRACE_ENUM_MAP_FILE=y

+ 1 - 1
arch/s390/configs/gcov_defconfig

@@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_TRACE_ENUM_MAP_FILE=y

+ 1 - 1
arch/s390/configs/performance_defconfig

@@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_TRACE_ENUM_MAP_FILE=y

+ 1 - 1
arch/s390/defconfig

@@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_KPROBES_SANITY_TEST=y

+ 1 - 0
arch/x86/configs/x86_64_defconfig

@@ -176,6 +176,7 @@ CONFIG_E1000E=y
 CONFIG_SKY2=y
 CONFIG_SKY2=y
 CONFIG_FORCEDETH=y
 CONFIG_FORCEDETH=y
 CONFIG_8139TOO=y
 CONFIG_8139TOO=y
+CONFIG_R8169=y
 CONFIG_FDDI=y
 CONFIG_FDDI=y
 CONFIG_INPUT_POLLDEV=y
 CONFIG_INPUT_POLLDEV=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set

+ 1 - 1
arch/x86/events/amd/core.c

@@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
 			return &amd_f15_PMC20;
 			return &amd_f15_PMC20;
 		}
 		}
 	case AMD_EVENT_NB:
 	case AMD_EVENT_NB:
-		/* moved to perf_event_amd_uncore.c */
+		/* moved to uncore.c */
 		return &emptyconstraint;
 		return &emptyconstraint;
 	default:
 	default:
 		return &emptyconstraint;
 		return &emptyconstraint;

+ 1 - 1
arch/x86/events/intel/cstate.c

@@ -1,5 +1,5 @@
 /*
 /*
- * perf_event_intel_cstate.c: support cstate residency counters
+ * Support cstate residency counters
  *
  *
  * Copyright (C) 2015, Intel Corp.
  * Copyright (C) 2015, Intel Corp.
  * Author: Kan Liang (kan.liang@intel.com)
  * Author: Kan Liang (kan.liang@intel.com)

+ 1 - 1
arch/x86/events/intel/rapl.c

@@ -1,5 +1,5 @@
 /*
 /*
- * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
+ * Support Intel RAPL energy consumption counters
  * Copyright (C) 2013 Google, Inc., Stephane Eranian
  * Copyright (C) 2013 Google, Inc., Stephane Eranian
  *
  *
  * Intel RAPL interface is specified in the IA-32 Manual Vol3b
  * Intel RAPL interface is specified in the IA-32 Manual Vol3b

+ 3 - 3
arch/x86/events/intel/uncore.h

@@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head;
 extern struct pci_extra_dev *uncore_extra_pci_dev;
 extern struct pci_extra_dev *uncore_extra_pci_dev;
 extern struct event_constraint uncore_constraint_empty;
 extern struct event_constraint uncore_constraint_empty;
 
 
-/* perf_event_intel_uncore_snb.c */
+/* uncore_snb.c */
 int snb_uncore_pci_init(void);
 int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
@@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void);
 void skl_uncore_cpu_init(void);
 void skl_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
 int snb_pci2phy_map_init(int devid);
 
 
-/* perf_event_intel_uncore_snbep.c */
+/* uncore_snbep.c */
 int snbep_uncore_pci_init(void);
 int snbep_uncore_pci_init(void);
 void snbep_uncore_cpu_init(void);
 void snbep_uncore_cpu_init(void);
 int ivbep_uncore_pci_init(void);
 int ivbep_uncore_pci_init(void);
@@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void);
 int skx_uncore_pci_init(void);
 int skx_uncore_pci_init(void);
 void skx_uncore_cpu_init(void);
 void skx_uncore_cpu_init(void);
 
 
-/* perf_event_intel_uncore_nhmex.c */
+/* uncore_nhmex.c */
 void nhmex_uncore_cpu_init(void);
 void nhmex_uncore_cpu_init(void);

+ 1 - 1
arch/x86/hyperv/hv_init.c

@@ -158,13 +158,13 @@ void hyperv_init(void)
 		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
 		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
 		return;
 		return;
 	}
 	}
+register_msr_cs:
 #endif
 #endif
 	/*
 	/*
 	 * For 32 bit guests just use the MSR based mechanism for reading
 	 * For 32 bit guests just use the MSR based mechanism for reading
 	 * the partition counter.
 	 * the partition counter.
 	 */
 	 */
 
 
-register_msr_cs:
 	hyperv_cs = &hyperv_cs_msr;
 	hyperv_cs = &hyperv_cs_msr;
 	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
 	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
 		clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
 		clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);

+ 9 - 6
arch/x86/include/asm/pkeys.h

@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 static inline
 static inline
 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
 {
 {
+	/*
+	 * "Allocated" pkeys are those that have been returned
+	 * from pkey_alloc().  pkey 0 is special, and never
+	 * returned from pkey_alloc().
+	 */
+	if (pkey <= 0)
+		return false;
+	if (pkey >= arch_max_pkey())
+		return false;
 	return mm_pkey_allocation_map(mm) & (1U << pkey);
 	return mm_pkey_allocation_map(mm) & (1U << pkey);
 }
 }
 
 
@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
 static inline
 static inline
 int mm_pkey_free(struct mm_struct *mm, int pkey)
 int mm_pkey_free(struct mm_struct *mm, int pkey)
 {
 {
-	/*
-	 * pkey 0 is special, always allocated and can never
-	 * be freed.
-	 */
-	if (!pkey)
-		return -EINVAL;
 	if (!mm_pkey_is_allocated(mm, pkey))
 	if (!mm_pkey_is_allocated(mm, pkey))
 		return -EINVAL;
 		return -EINVAL;
 
 

+ 1 - 1
arch/x86/include/uapi/asm/bootparam.h

@@ -58,7 +58,7 @@ struct setup_header {
 	__u32	header;
 	__u32	header;
 	__u16	version;
 	__u16	version;
 	__u32	realmode_swtch;
 	__u32	realmode_swtch;
-	__u16	start_sys;
+	__u16	start_sys_seg;
 	__u16	kernel_version;
 	__u16	kernel_version;
 	__u8	type_of_loader;
 	__u8	type_of_loader;
 	__u8	loadflags;
 	__u8	loadflags;

+ 7 - 16
arch/x86/kernel/apic/apic.c

@@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { }
 static inline void __x2apic_enable(void) { }
 static inline void __x2apic_enable(void) { }
 #endif /* !CONFIG_X86_X2APIC */
 #endif /* !CONFIG_X86_X2APIC */
 
 
-static int __init try_to_enable_IR(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-	if (!x2apic_enabled() && skip_ioapic_setup) {
-		pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
-		return -1;
-	}
-#endif
-	return irq_remapping_enable();
-}
-
 void __init enable_IR_x2apic(void)
 void __init enable_IR_x2apic(void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	int ret, ir_stat;
 	int ret, ir_stat;
 
 
-	if (skip_ioapic_setup)
+	if (skip_ioapic_setup) {
+		pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
 		return;
 		return;
+	}
 
 
 	ir_stat = irq_remapping_prepare();
 	ir_stat = irq_remapping_prepare();
 	if (ir_stat < 0 && !x2apic_supported())
 	if (ir_stat < 0 && !x2apic_supported())
@@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void)
 
 
 	/* If irq_remapping_prepare() succeeded, try to enable it */
 	/* If irq_remapping_prepare() succeeded, try to enable it */
 	if (ir_stat >= 0)
 	if (ir_stat >= 0)
-		ir_stat = try_to_enable_IR();
+		ir_stat = irq_remapping_enable();
 	/* ir_stat contains the remap mode or an error code */
 	/* ir_stat contains the remap mode or an error code */
 	try_to_enable_x2apic(ir_stat);
 	try_to_enable_x2apic(ir_stat);
 
 
@@ -2062,10 +2053,10 @@ static int allocate_logical_cpuid(int apicid)
 
 
 	/* Allocate a new cpuid. */
 	/* Allocate a new cpuid. */
 	if (nr_logical_cpuids >= nr_cpu_ids) {
 	if (nr_logical_cpuids >= nr_cpu_ids) {
-		WARN_ONCE(1, "Only %d processors supported."
+		WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. "
 			     "Processor %d/0x%x and the rest are ignored.\n",
 			     "Processor %d/0x%x and the rest are ignored.\n",
-			     nr_cpu_ids - 1, nr_logical_cpuids, apicid);
-		return -1;
+			     nr_cpu_ids, nr_logical_cpuids, apicid);
+		return -EINVAL;
 	}
 	}
 
 
 	cpuid_to_apicid[nr_logical_cpuids] = apicid;
 	cpuid_to_apicid[nr_logical_cpuids] = apicid;

+ 0 - 4
arch/x86/kernel/cpu/amd.c

@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 	if (c->x86_power & (1 << 8)) {
 	if (c->x86_power & (1 << 8)) {
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-		if (check_tsc_unstable())
-			clear_sched_clock_stable();
-	} else {
-		clear_sched_clock_stable();
 	}
 	}
 
 
 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */

+ 0 - 2
arch/x86/kernel/cpu/centaur.c

@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 #endif
 #endif
-
-	clear_sched_clock_stable();
 }
 }
 
 
 static void init_centaur(struct cpuinfo_x86 *c)
 static void init_centaur(struct cpuinfo_x86 *c)

+ 0 - 3
arch/x86/kernel/cpu/common.c

@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
 			strcpy(c->x86_model_id, "386");
 			strcpy(c->x86_model_id, "386");
 	}
 	}
 #endif
 #endif
-	clear_sched_clock_stable();
 }
 }
 
 
 static const struct cpu_dev default_cpu = {
 static const struct cpu_dev default_cpu = {
@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 	 */
 	 */
 	if (this_cpu->c_init)
 	if (this_cpu->c_init)
 		this_cpu->c_init(c);
 		this_cpu->c_init(c);
-	else
-		clear_sched_clock_stable();
 
 
 	/* Disable the PN if appropriate */
 	/* Disable the PN if appropriate */
 	squash_the_stupid_serial_number(c);
 	squash_the_stupid_serial_number(c);

+ 0 - 1
arch/x86/kernel/cpu/cyrix.c

@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
 		break;
 		break;
 	}
 	}
-	clear_sched_clock_stable();
 }
 }
 
 
 static void init_cyrix(struct cpuinfo_x86 *c)
 static void init_cyrix(struct cpuinfo_x86 *c)

+ 0 - 4
arch/x86/kernel/cpu/intel.c

@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
 	if (c->x86_power & (1 << 8)) {
 	if (c->x86_power & (1 << 8)) {
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-		if (check_tsc_unstable())
-			clear_sched_clock_stable();
-	} else {
-		clear_sched_clock_stable();
 	}
 	}
 
 
 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */

+ 0 - 1
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

@@ -28,7 +28,6 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/task.h>
 #include <linux/sched/task.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-#include <linux/cpu.h>
 #include <linux/task_work.h>
 #include <linux/task_work.h>
 
 
 #include <uapi/linux/magic.h>
 #include <uapi/linux/magic.h>

+ 0 - 2
arch/x86/kernel/cpu/transmeta.c

@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
 		if (xlvl >= 0x80860001)
 		if (xlvl >= 0x80860001)
 			c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
 			c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
 	}
 	}
-
-	clear_sched_clock_stable();
 }
 }
 
 
 static void init_transmeta(struct cpuinfo_x86 *c)
 static void init_transmeta(struct cpuinfo_x86 *c)

+ 0 - 1
arch/x86/kernel/cpu/vmware.c

@@ -30,7 +30,6 @@
 #include <asm/hypervisor.h>
 #include <asm/hypervisor.h>
 #include <asm/timer.h>
 #include <asm/timer.h>
 #include <asm/apic.h>
 #include <asm/apic.h>
-#include <asm/timer.h>
 
 
 #undef pr_fmt
 #undef pr_fmt
 #define pr_fmt(fmt)	"vmware: " fmt
 #define pr_fmt(fmt)	"vmware: " fmt

+ 1 - 1
arch/x86/kernel/hpet.c

@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 
 
 		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
-		disable_irq(hdev->irq);
+		disable_hardirq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		enable_irq(hdev->irq);
 		enable_irq(hdev->irq);
 	}
 	}

+ 1 - 1
arch/x86/kernel/kdebugfs.c

@@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void)
 	struct dentry *dbp, *version, *data;
 	struct dentry *dbp, *version, *data;
 	int error = -ENOMEM;
 	int error = -ENOMEM;
 
 
-	dbp = debugfs_create_dir("boot_params", NULL);
+	dbp = debugfs_create_dir("boot_params", arch_debugfs_dir);
 	if (!dbp)
 	if (!dbp)
 		return -ENOMEM;
 		return -ENOMEM;
 
 

+ 1 - 1
arch/x86/kernel/kprobes/common.h

@@ -67,7 +67,7 @@
 #endif
 #endif
 
 
 /* Ensure if the instruction can be boostable */
 /* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
 /* Recover instruction if given address is probed */
 /* Recover instruction if given address is probed */
 extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
 extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
 					 unsigned long addr);
 					 unsigned long addr);

+ 3 - 3
arch/x86/kernel/kprobes/core.c

@@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
  * Returns non-zero if opcode is boostable.
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
  */
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
 {
 {
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t *orig_opcodes = opcodes;
 	kprobe_opcode_t *orig_opcodes = opcodes;
 
 
-	if (search_exception_tables((unsigned long)opcodes))
+	if (search_exception_tables((unsigned long)addr))
 		return 0;	/* Page fault may occur on this address. */
 		return 0;	/* Page fault may occur on this address. */
 
 
 retry:
 retry:
@@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p)
 	 * __copy_instruction can modify the displacement of the instruction,
 	 * __copy_instruction can modify the displacement of the instruction,
 	 * but it doesn't affect boostable check.
 	 * but it doesn't affect boostable check.
 	 */
 	 */
-	if (can_boost(p->ainsn.insn))
+	if (can_boost(p->ainsn.insn, p->addr))
 		p->ainsn.boostable = 0;
 		p->ainsn.boostable = 0;
 	else
 	else
 		p->ainsn.boostable = -1;
 		p->ainsn.boostable = -1;

+ 1 - 1
arch/x86/kernel/kprobes/opt.c

@@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
 
 
 	while (len < RELATIVEJUMP_SIZE) {
 	while (len < RELATIVEJUMP_SIZE) {
 		ret = __copy_instruction(dest + len, src + len);
 		ret = __copy_instruction(dest + len, src + len);
-		if (!ret || !can_boost(dest + len))
+		if (!ret || !can_boost(dest + len, src + len))
 			return -EINVAL;
 			return -EINVAL;
 		len += ret;
 		len += ret;
 	}
 	}

+ 16 - 0
arch/x86/kernel/reboot.c

@@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
 			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
 		},
 		},
 	},
 	},
+	{	/* Handle problems with rebooting on ASUS EeeBook X205TA */
+		.callback = set_acpi_reboot,
+		.ident = "ASUS EeeBook X205TA",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+		},
+	},
+	{	/* Handle problems with rebooting on ASUS EeeBook X205TAW */
+		.callback = set_acpi_reboot,
+		.ident = "ASUS EeeBook X205TAW",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+		},
+	},
 
 
 	/* Certec */
 	/* Certec */
 	{       /* Handle problems with rebooting on Certec BPC600 */
 	{       /* Handle problems with rebooting on Certec BPC600 */

+ 23 - 12
arch/x86/kernel/tsc.c

@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
 {
 {
 	return paravirt_sched_clock();
 	return paravirt_sched_clock();
 }
 }
+
+static inline bool using_native_sched_clock(void)
+{
+	return pv_time_ops.sched_clock == native_sched_clock;
+}
 #else
 #else
 unsigned long long
 unsigned long long
 sched_clock(void) __attribute__((alias("native_sched_clock")));
 sched_clock(void) __attribute__((alias("native_sched_clock")));
+
+static inline bool using_native_sched_clock(void) { return true; }
 #endif
 #endif
 
 
 int check_tsc_unstable(void)
 int check_tsc_unstable(void)
@@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
 {
 {
 	if (tsc_unstable)
 	if (tsc_unstable)
 		return;
 		return;
+
 	tsc_unstable = 1;
 	tsc_unstable = 1;
-	clear_sched_clock_stable();
+	if (using_native_sched_clock())
+		clear_sched_clock_stable();
 	disable_sched_clock_irqtime();
 	disable_sched_clock_irqtime();
 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
 }
 }
@@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = {
 
 
 void mark_tsc_unstable(char *reason)
 void mark_tsc_unstable(char *reason)
 {
 {
-	if (!tsc_unstable) {
-		tsc_unstable = 1;
+	if (tsc_unstable)
+		return;
+
+	tsc_unstable = 1;
+	if (using_native_sched_clock())
 		clear_sched_clock_stable();
 		clear_sched_clock_stable();
-		disable_sched_clock_irqtime();
-		pr_info("Marking TSC unstable due to %s\n", reason);
-		/* Change only the rating, when not registered */
-		if (clocksource_tsc.mult)
-			clocksource_mark_unstable(&clocksource_tsc);
-		else {
-			clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
-			clocksource_tsc.rating = 0;
-		}
+	disable_sched_clock_irqtime();
+	pr_info("Marking TSC unstable due to %s\n", reason);
+	/* Change only the rating, when not registered */
+	if (clocksource_tsc.mult) {
+		clocksource_mark_unstable(&clocksource_tsc);
+	} else {
+		clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
+		clocksource_tsc.rating = 0;
 	}
 	}
 }
 }
 
 

+ 9 - 0
arch/x86/pci/common.c

@@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev)
 		pcibios_disable_irq(dev);
 		pcibios_disable_irq(dev);
 }
 }
 
 
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pcibios_release_device(struct pci_dev *dev)
+{
+	if (atomic_dec_return(&dev->enable_cnt) >= 0)
+		pcibios_disable_device(dev);
+
+}
+#endif
+
 int pci_ext_cfg_avail(void)
 int pci_ext_cfg_avail(void)
 {
 {
 	if (raw_pci_ext_ops)
 	if (raw_pci_ext_ops)

+ 0 - 1
arch/x86/platform/uv/tlb_uv.c

@@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode)
 
 
 	ops.write_payload_first(pnode, first);
 	ops.write_payload_first(pnode, first);
 	ops.write_payload_last(pnode, last);
 	ops.write_payload_last(pnode, last);
-	ops.write_g_sw_ack(pnode, 0xffffUL);
 
 
 	/* in effect, all msg_type's are set to MSG_NOOP */
 	/* in effect, all msg_type's are set to MSG_NOOP */
 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);

+ 6 - 5
arch/x86/purgatory/purgatory.c

@@ -11,6 +11,7 @@
  */
  */
 
 
 #include "sha256.h"
 #include "sha256.h"
+#include "purgatory.h"
 #include "../boot/string.h"
 #include "../boot/string.h"
 
 
 struct sha_region {
 struct sha_region {
@@ -18,11 +19,11 @@ struct sha_region {
 	unsigned long len;
 	unsigned long len;
 };
 };
 
 
-unsigned long backup_dest = 0;
-unsigned long backup_src = 0;
-unsigned long backup_sz = 0;
+static unsigned long backup_dest;
+static unsigned long backup_src;
+static unsigned long backup_sz;
 
 
-u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
+static u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
 
 
 struct sha_region sha_regions[16] = {};
 struct sha_region sha_regions[16] = {};
 
 
@@ -39,7 +40,7 @@ static int copy_backup_region(void)
 	return 0;
 	return 0;
 }
 }
 
 
-int verify_sha256_digest(void)
+static int verify_sha256_digest(void)
 {
 {
 	struct sha_region *ptr, *end;
 	struct sha_region *ptr, *end;
 	u8 digest[SHA256_DIGEST_SIZE];
 	u8 digest[SHA256_DIGEST_SIZE];

+ 8 - 0
arch/x86/purgatory/purgatory.h

@@ -0,0 +1,8 @@
+#ifndef PURGATORY_H
+#define PURGATORY_H
+
+#ifndef __ASSEMBLY__
+extern void purgatory(void);
+#endif	/* __ASSEMBLY__ */
+
+#endif /* PURGATORY_H */

+ 1 - 0
arch/x86/purgatory/setup-x86_64.S

@@ -9,6 +9,7 @@
  * This source code is licensed under the GNU General Public License,
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  * Version 2.  See the file COPYING for more details.
  */
  */
+#include "purgatory.h"
 
 
 	.text
 	.text
 	.globl purgatory_start
 	.globl purgatory_start

+ 2 - 0
drivers/acpi/internal.h

@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void);
 void acpi_container_init(void);
 void acpi_container_init(void);
 void acpi_memory_hotplug_init(void);
 void acpi_memory_hotplug_init(void);
 #ifdef	CONFIG_ACPI_HOTPLUG_IOAPIC
 #ifdef	CONFIG_ACPI_HOTPLUG_IOAPIC
+void pci_ioapic_remove(struct acpi_pci_root *root);
 int acpi_ioapic_remove(struct acpi_pci_root *root);
 int acpi_ioapic_remove(struct acpi_pci_root *root);
 #else
 #else
+static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; }
 static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
 static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
 #endif
 #endif
 #ifdef CONFIG_ACPI_DOCK
 #ifdef CONFIG_ACPI_DOCK

+ 16 - 6
drivers/acpi/ioapic.c

@@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle)
 	return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
 	return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
 }
 }
 
 
-int acpi_ioapic_remove(struct acpi_pci_root *root)
+void pci_ioapic_remove(struct acpi_pci_root *root)
 {
 {
-	int retval = 0;
 	struct acpi_pci_ioapic *ioapic, *tmp;
 	struct acpi_pci_ioapic *ioapic, *tmp;
 
 
 	mutex_lock(&ioapic_list_lock);
 	mutex_lock(&ioapic_list_lock);
 	list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
 	list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
 		if (root->device->handle != ioapic->root_handle)
 		if (root->device->handle != ioapic->root_handle)
 			continue;
 			continue;
-
-		if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
-			retval = -EBUSY;
-
 		if (ioapic->pdev) {
 		if (ioapic->pdev) {
 			pci_release_region(ioapic->pdev, 0);
 			pci_release_region(ioapic->pdev, 0);
 			pci_disable_device(ioapic->pdev);
 			pci_disable_device(ioapic->pdev);
 			pci_dev_put(ioapic->pdev);
 			pci_dev_put(ioapic->pdev);
 		}
 		}
+	}
+	mutex_unlock(&ioapic_list_lock);
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+	int retval = 0;
+	struct acpi_pci_ioapic *ioapic, *tmp;
+
+	mutex_lock(&ioapic_list_lock);
+	list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+		if (root->device->handle != ioapic->root_handle)
+			continue;
+		if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+			retval = -EBUSY;
 		if (ioapic->res.flags && ioapic->res.parent)
 		if (ioapic->res.flags && ioapic->res.parent)
 			release_resource(&ioapic->res);
 			release_resource(&ioapic->res);
 		list_del(&ioapic->list);
 		list_del(&ioapic->list);

+ 2 - 2
drivers/acpi/pci_root.c

@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device)
 
 
 	pci_stop_root_bus(root->bus);
 	pci_stop_root_bus(root->bus);
 
 
-	WARN_ON(acpi_ioapic_remove(root));
-
+	pci_ioapic_remove(root);
 	device_set_run_wake(root->bus->bridge, false);
 	device_set_run_wake(root->bus->bridge, false);
 	pci_acpi_remove_bus_pm_notifier(device);
 	pci_acpi_remove_bus_pm_notifier(device);
 
 
 	pci_remove_root_bus(root->bus);
 	pci_remove_root_bus(root->bus);
+	WARN_ON(acpi_ioapic_remove(root));
 
 
 	dmar_device_remove(device->handle);
 	dmar_device_remove(device->handle);
 
 

+ 1 - 1
drivers/char/nwbutton.c

@@ -6,7 +6,7 @@
 
 
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/time.h>
 #include <linux/timer.h>
 #include <linux/timer.h>

+ 1 - 0
drivers/firmware/efi/arm-runtime.c

@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
 	bool systab_found;
 	bool systab_found;
 
 
 	efi_mm.pgd = pgd_alloc(&efi_mm);
 	efi_mm.pgd = pgd_alloc(&efi_mm);
+	mm_init_cpumask(&efi_mm);
 	init_new_context(NULL, &efi_mm);
 	init_new_context(NULL, &efi_mm);
 
 
 	systab_found = false;
 	systab_found = false;

+ 2 - 2
drivers/firmware/efi/libstub/secureboot.c

@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
 	size = sizeof(secboot);
 	size = sizeof(secboot);
 	status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
 	status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
 			     NULL, &size, &secboot);
 			     NULL, &size, &secboot);
+	if (status == EFI_NOT_FOUND)
+		return efi_secureboot_mode_disabled;
 	if (status != EFI_SUCCESS)
 	if (status != EFI_SUCCESS)
 		goto out_efi_err;
 		goto out_efi_err;
 
 
@@ -78,7 +80,5 @@ secure_boot_enabled:
 
 
 out_efi_err:
 out_efi_err:
 	pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
 	pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
-	if (status == EFI_NOT_FOUND)
-		return efi_secureboot_mode_disabled;
 	return efi_secureboot_mode_unknown;
 	return efi_secureboot_mode_unknown;
 }
 }

+ 2 - 1
drivers/irqchip/irq-crossbar.c

@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
 
 
 static int __init crossbar_of_init(struct device_node *node)
 static int __init crossbar_of_init(struct device_node *node)
 {
 {
-	int i, size, max = 0, reserved = 0, entry;
+	int i, size, reserved = 0;
+	u32 max = 0, entry;
 	const __be32 *irqsr;
 	const __be32 *irqsr;
 	int ret = -ENOMEM;
 	int ret = -ENOMEM;
 
 

+ 47 - 0
drivers/xen/swiotlb-xen.c

@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
 EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		     unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	if (__generic_dma_ops(dev)->mmap)
+		return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+						    dma_addr, size, attrs);
+#endif
+	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+
+/*
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+			void *cpu_addr, dma_addr_t handle, size_t size,
+			unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	if (__generic_dma_ops(dev)->get_sgtable) {
+#if 0
+	/*
+	 * This check verifies that the page belongs to the current domain and
+	 * is not one mapped from another domain.
+	 * This check is for debug only, and should not go to production build
+	 */
+		unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
+		BUG_ON (!page_is_ram(bfn));
+#endif
+		return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+							   handle, size, attrs);
+	}
+#endif
+	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);

+ 4 - 4
fs/timerfd.c

@@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
 	     clockid != CLOCK_BOOTTIME_ALARM))
 	     clockid != CLOCK_BOOTTIME_ALARM))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (!capable(CAP_WAKE_ALARM) &&
-	    (clockid == CLOCK_REALTIME_ALARM ||
-	     clockid == CLOCK_BOOTTIME_ALARM))
+	if ((clockid == CLOCK_REALTIME_ALARM ||
+	     clockid == CLOCK_BOOTTIME_ALARM) &&
+	    !capable(CAP_WAKE_ALARM))
 		return -EPERM;
 		return -EPERM;
 
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags,
 		return ret;
 		return ret;
 	ctx = f.file->private_data;
 	ctx = f.file->private_data;
 
 
-	if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+	if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
 		fdput(f);
 		fdput(f);
 		return -EPERM;
 		return -EPERM;
 	}
 	}

+ 9 - 2
include/linux/jump_label.h

@@ -90,6 +90,13 @@ extern bool static_key_initialized;
 struct static_key {
 struct static_key {
 	atomic_t enabled;
 	atomic_t enabled;
 /*
 /*
+ * Note:
+ *   To make anonymous unions work with old compilers, the static
+ *   initialization of them requires brackets. This creates a dependency
+ *   on the order of the struct with the initializers. If any fields
+ *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
+ *   to be modified.
+ *
  * bit 0 => 1 if key is initially true
  * bit 0 => 1 if key is initially true
  *	    0 if initially false
  *	    0 if initially false
  * bit 1 => 1 if points to struct static_key_mod
  * bit 1 => 1 if points to struct static_key_mod
@@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key);
  */
  */
 #define STATIC_KEY_INIT_TRUE					\
 #define STATIC_KEY_INIT_TRUE					\
 	{ .enabled = { 1 },					\
 	{ .enabled = { 1 },					\
-	  .entries = (void *)JUMP_TYPE_TRUE }
+	  { .entries = (void *)JUMP_TYPE_TRUE } }
 #define STATIC_KEY_INIT_FALSE					\
 #define STATIC_KEY_INIT_FALSE					\
 	{ .enabled = { 0 },					\
 	{ .enabled = { 0 },					\
-	  .entries = (void *)JUMP_TYPE_FALSE }
+	  { .entries = (void *)JUMP_TYPE_FALSE } }
 
 
 #else  /* !HAVE_JUMP_LABEL */
 #else  /* !HAVE_JUMP_LABEL */
 
 

+ 1 - 1
include/linux/user_namespace.h

@@ -72,7 +72,7 @@ struct ucounts {
 	struct hlist_node node;
 	struct hlist_node node;
 	struct user_namespace *ns;
 	struct user_namespace *ns;
 	kuid_t uid;
 	kuid_t uid;
-	atomic_t count;
+	int count;
 	atomic_t ucount[UCOUNT_COUNTS];
 	atomic_t ucount[UCOUNT_COUNTS];
 };
 };
 
 

+ 1 - 0
include/trace/events/syscalls.h

@@ -1,5 +1,6 @@
 #undef TRACE_SYSTEM
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM raw_syscalls
 #define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE syscalls
 #define TRACE_INCLUDE_FILE syscalls
 
 
 #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
 #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)

+ 11 - 0
include/xen/swiotlb-xen.h

@@ -2,6 +2,7 @@
 #define __LINUX_SWIOTLB_XEN_H
 #define __LINUX_SWIOTLB_XEN_H
 
 
 #include <linux/dma-direction.h>
 #include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 #include <linux/swiotlb.h>
 
 
 extern int xen_swiotlb_init(int verbose, bool early);
 extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
 
 extern int
 extern int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
+
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		     unsigned long attrs);
+
+extern int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+			void *cpu_addr, dma_addr_t handle, size_t size,
+			unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
 #endif /* __LINUX_SWIOTLB_XEN_H */

+ 9 - 2
kernel/locking/lockdep.c

@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	if (depth) {
 	if (depth) {
 		hlock = curr->held_locks + depth - 1;
 		hlock = curr->held_locks + depth - 1;
 		if (hlock->class_idx == class_idx && nest_lock) {
 		if (hlock->class_idx == class_idx && nest_lock) {
-			if (hlock->references)
+			if (hlock->references) {
+				/*
+				 * Check: unsigned int references:12, overflow.
+				 */
+				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+					return 0;
+
 				hlock->references++;
 				hlock->references++;
-			else
+			} else {
 				hlock->references = 2;
 				hlock->references = 2;
+			}
 
 
 			return 1;
 			return 1;
 		}
 		}

+ 3 - 3
kernel/locking/test-ww_mutex.c

@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
 
 
 	if (mtx->flags & TEST_MTX_TRY) {
 	if (mtx->flags & TEST_MTX_TRY) {
 		while (!ww_mutex_trylock(&mtx->mutex))
 		while (!ww_mutex_trylock(&mtx->mutex))
-			cpu_relax();
+			cond_resched();
 	} else {
 	} else {
 		ww_mutex_lock(&mtx->mutex, NULL);
 		ww_mutex_lock(&mtx->mutex, NULL);
 	}
 	}
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
 				ret = -EINVAL;
 				ret = -EINVAL;
 				break;
 				break;
 			}
 			}
-			cpu_relax();
+			cond_resched();
 		} while (time_before(jiffies, timeout));
 		} while (time_before(jiffies, timeout));
 	} else {
 	} else {
 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
+	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 

+ 8 - 3
kernel/sched/core.c

@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	struct task_struct *p;
 	struct task_struct *p;
 
 
 	/*
 	/*
-	 * Optimization: we know that if all tasks are in
-	 * the fair class we can call that function directly:
+	 * Optimization: we know that if all tasks are in the fair class we can
+	 * call that function directly, but only if the @prev task wasn't of a
+	 * higher scheduling class, because otherwise those loose the
+	 * opportunity to pull in more work from other CPUs.
 	 */
 	 */
-	if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+	if (likely((prev->sched_class == &idle_sched_class ||
+		    prev->sched_class == &fair_sched_class) &&
+		   rq->nr_running == rq->cfs.h_nr_running)) {
+
 		p = fair_sched_class.pick_next_task(rq, prev, rf);
 		p = fair_sched_class.pick_next_task(rq, prev, rf);
 		if (unlikely(p == RETRY_TASK))
 		if (unlikely(p == RETRY_TASK))
 			goto again;
 			goto again;

+ 1 - 1
kernel/sched/fair.c

@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * particularly is sensitive here.
 	 * particularly is sensitive here.
 	 */
 	 */
-	if ((avg_idle / 512) < avg_cost)
+	if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
 		return -1;
 		return -1;
 
 
 	time = local_clock();
 	time = local_clock();

+ 5 - 0
kernel/sched/features.h

@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
  */
  */
 SCHED_FEAT(TTWU_QUEUE, true)
 SCHED_FEAT(TTWU_QUEUE, true)
 
 
+/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+ */
+SCHED_FEAT(SIS_AVG_CPU, false)
+
 #ifdef HAVE_RT_PUSH_IPI
 #ifdef HAVE_RT_PUSH_IPI
 /*
 /*
  * In order to avoid a thundering herd attack of CPUs that are
  * In order to avoid a thundering herd attack of CPUs that are

+ 1 - 1
kernel/time/jiffies.c

@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
 	shift_hz += cycles_per_tick/2;
 	shift_hz += cycles_per_tick/2;
 	do_div(shift_hz, cycles_per_tick);
 	do_div(shift_hz, cycles_per_tick);
 	/* Calculate nsec_per_tick using shift_hz */
 	/* Calculate nsec_per_tick using shift_hz */
-	nsec_per_tick = (u64)TICK_NSEC << 8;
+	nsec_per_tick = (u64)NSEC_PER_SEC << 8;
 	nsec_per_tick += (u32)shift_hz/2;
 	nsec_per_tick += (u32)shift_hz/2;
 	do_div(nsec_per_tick, (u32)shift_hz);
 	do_div(nsec_per_tick, (u32)shift_hz);
 
 

+ 3 - 3
kernel/trace/Kconfig

@@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
-config KPROBE_EVENT
+config KPROBE_EVENTS
 	depends on KPROBES
 	depends on KPROBES
 	depends on HAVE_REGS_AND_STACK_ACCESS_API
 	depends on HAVE_REGS_AND_STACK_ACCESS_API
 	bool "Enable kprobes-based dynamic events"
 	bool "Enable kprobes-based dynamic events"
@@ -447,7 +447,7 @@ config KPROBE_EVENT
 	  This option is also required by perf-probe subcommand of perf tools.
 	  This option is also required by perf-probe subcommand of perf tools.
 	  If you want to use perf tools, this option is strongly recommended.
 	  If you want to use perf tools, this option is strongly recommended.
 
 
-config UPROBE_EVENT
+config UPROBE_EVENTS
 	bool "Enable uprobes-based dynamic events"
 	bool "Enable uprobes-based dynamic events"
 	depends on ARCH_SUPPORTS_UPROBES
 	depends on ARCH_SUPPORTS_UPROBES
 	depends on MMU
 	depends on MMU
@@ -466,7 +466,7 @@ config UPROBE_EVENT
 
 
 config BPF_EVENTS
 config BPF_EVENTS
 	depends on BPF_SYSCALL
 	depends on BPF_SYSCALL
-	depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
+	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
 	bool
 	bool
 	default y
 	default y
 	help
 	help

+ 2 - 2
kernel/trace/Makefile

@@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
 obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
 obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
 obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
 obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
 obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
-obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
+obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
 obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 ifeq ($(CONFIG_PM),y)
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
 obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
-obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
 
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
 

+ 18 - 5
kernel/trace/ftrace.c

@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
 }
 }
 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
 
 
+static int __init set_graph_max_depth_function(char *str)
+{
+	if (!str)
+		return 0;
+	fgraph_max_depth = simple_strtoul(str, NULL, 0);
+	return 1;
+}
+__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
+
 static void __init set_ftrace_early_graph(char *buf, int enable)
 static void __init set_ftrace_early_graph(char *buf, int enable)
 {
 {
 	int ret;
 	int ret;
 	char *func;
 	char *func;
 	struct ftrace_hash *hash;
 	struct ftrace_hash *hash;
 
 
-	if (enable)
-		hash = ftrace_graph_hash;
-	else
-		hash = ftrace_graph_notrace_hash;
+	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+	if (WARN_ON(!hash))
+		return;
 
 
 	while (buf) {
 	while (buf) {
 		func = strsep(&buf, ",");
 		func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
 			printk(KERN_DEBUG "ftrace: function %s not "
 			printk(KERN_DEBUG "ftrace: function %s not "
 					  "traceable\n", func);
 					  "traceable\n", func);
 	}
 	}
+
+	if (enable)
+		ftrace_graph_hash = hash;
+	else
+		ftrace_graph_notrace_hash = hash;
 }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
  * Normally the mcount trampoline will call the ops->func, but there
  * Normally the mcount trampoline will call the ops->func, but there
  * are times that it should not. For example, if the ops does not
  * are times that it should not. For example, if the ops does not
  * have its own recursion protection, then it should call the
  * have its own recursion protection, then it should call the
- * ftrace_ops_recurs_func() instead.
+ * ftrace_ops_assist_func() instead.
  *
  *
  * Returns the function that the trampoline should call for @ops.
  * Returns the function that the trampoline should call for @ops.
  */
  */

+ 5 - 5
kernel/trace/trace.c

@@ -4341,22 +4341,22 @@ static const char readme_msg[] =
 	"\t\t\t  traces\n"
 	"\t\t\t  traces\n"
 #endif
 #endif
 #endif /* CONFIG_STACK_TRACER */
 #endif /* CONFIG_STACK_TRACER */
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
 	"  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
 	"  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 #endif
 #endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
 	"  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
 	"  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 #endif
 #endif
-#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
 	"\t  accepts: event-definitions (one definition per line)\n"
 	"\t  accepts: event-definitions (one definition per line)\n"
 	"\t   Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
 	"\t   Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
 	"\t           -:[<group>/]<event>\n"
 	"\t           -:[<group>/]<event>\n"
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 #endif
 #endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
 	"\t    place: <path>:<offset>\n"
 	"\t    place: <path>:<offset>\n"
 #endif
 #endif
 	"\t     args: <name>=fetcharg[:type]\n"
 	"\t     args: <name>=fetcharg[:type]\n"

+ 2 - 2
kernel/trace/trace_probe.h

@@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype),			\
 #define FETCH_TYPE_STRING	0
 #define FETCH_TYPE_STRING	0
 #define FETCH_TYPE_STRSIZE	1
 #define FETCH_TYPE_STRSIZE	1
 
 
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
 struct symbol_cache;
 struct symbol_cache;
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
@@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
 {
 {
 	return NULL;
 	return NULL;
 }
 }
-#endif /* CONFIG_KPROBE_EVENT */
+#endif /* CONFIG_KPROBE_EVENTS */
 
 
 struct probe_arg {
 struct probe_arg {
 	struct fetch_param	fetch;
 	struct fetch_param	fetch;

+ 11 - 7
kernel/ucount.c

@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
 
 
 		new->ns = ns;
 		new->ns = ns;
 		new->uid = uid;
 		new->uid = uid;
-		atomic_set(&new->count, 0);
+		new->count = 0;
 
 
 		spin_lock_irq(&ucounts_lock);
 		spin_lock_irq(&ucounts_lock);
 		ucounts = find_ucounts(ns, uid, hashent);
 		ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
 			ucounts = new;
 			ucounts = new;
 		}
 		}
 	}
 	}
-	if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+	if (ucounts->count == INT_MAX)
 		ucounts = NULL;
 		ucounts = NULL;
+	else
+		ucounts->count += 1;
 	spin_unlock_irq(&ucounts_lock);
 	spin_unlock_irq(&ucounts_lock);
 	return ucounts;
 	return ucounts;
 }
 }
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (atomic_dec_and_test(&ucounts->count)) {
-		spin_lock_irqsave(&ucounts_lock, flags);
+	spin_lock_irqsave(&ucounts_lock, flags);
+	ucounts->count -= 1;
+	if (!ucounts->count)
 		hlist_del_init(&ucounts->node);
 		hlist_del_init(&ucounts->node);
-		spin_unlock_irqrestore(&ucounts_lock, flags);
+	else
+		ucounts = NULL;
+	spin_unlock_irqrestore(&ucounts_lock, flags);
 
 
-		kfree(ucounts);
-	}
+	kfree(ucounts);
 }
 }
 
 
 static inline bool atomic_inc_below(atomic_t *v, int u)
 static inline bool atomic_inc_below(atomic_t *v, int u)

+ 2 - 2
lib/radix-tree.c

@@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
 		if (!bitmap)
 		if (!bitmap)
 			return 0;
 			return 0;
-		bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
-		kfree(bitmap);
+		if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
+			kfree(bitmap);
 	}
 	}
 
 
 	return 1;
 	return 1;

+ 7 - 7
lib/refcount.c

@@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
 		val = old;
 		val = old;
 	}
 	}
 
 
-	WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
 
 	return true;
 	return true;
 }
 }
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero);
 
 
 void refcount_add(unsigned int i, refcount_t *r)
 void refcount_add(unsigned int i, refcount_t *r)
 {
 {
-	WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
 }
 }
 EXPORT_SYMBOL_GPL(refcount_add);
 EXPORT_SYMBOL_GPL(refcount_add);
 
 
@@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r)
 		val = old;
 		val = old;
 	}
 	}
 
 
-	WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
 
 	return true;
 	return true;
 }
 }
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
  */
  */
 void refcount_inc(refcount_t *r)
 void refcount_inc(refcount_t *r)
 {
 {
-	WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
 }
 }
 EXPORT_SYMBOL_GPL(refcount_inc);
 EXPORT_SYMBOL_GPL(refcount_inc);
 
 
@@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 
 
 		new = val - i;
 		new = val - i;
 		if (new > val) {
 		if (new > val) {
-			WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
 			return false;
 			return false;
 		}
 		}
 
 
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test);
 
 
 void refcount_dec(refcount_t *r)
 void refcount_dec(refcount_t *r)
 {
 {
-	WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
 }
 }
 EXPORT_SYMBOL_GPL(refcount_dec);
 EXPORT_SYMBOL_GPL(refcount_dec);
 
 
@@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r)
 
 
 		new = val - 1;
 		new = val - 1;
 		if (new > val) {
 		if (new > val) {
-			WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
 			return true;
 			return true;
 		}
 		}
 
 

+ 2 - 0
scripts/module-common.lds

@@ -22,4 +22,6 @@ SECTIONS {
 
 
 	. = ALIGN(8);
 	. = ALIGN(8);
 	.init_array		0 : { *(SORT(.init_array.*)) *(.init_array) }
 	.init_array		0 : { *(SORT(.init_array.*)) *(.init_array) }
+
+	__jump_table		0 : ALIGN(8) { KEEP(*(__jump_table)) }
 }
 }

部分文件因为文件数量过多而无法显示