Browse Source

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "Among the traditional bug fixes and cleanups are some improvements:

   - A tool to generated the facility lists, generating the bit fields
     by hand has been a source of bugs in the past

   - The spinlock loop is reordered to avoid bursts of hypervisor calls

   - Add support for the open-for-business interface to the service
     element

   - The get_cpu call is added to the vdso

   - A set of tracepoints is defined for the common I/O layer

   - The deprecated sclp_cpi module is removed

   - Update default configuration"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (56 commits)
  s390/sclp: fix possible control register corruption
  s390: fix normalization bug in exception table sorting
  s390/configs: update default configurations
  s390/vdso: optimize getcpu system call
  s390: drop smp_mb in vdso_init
  s390: rename struct _lowcore to struct lowcore
  s390/mem_detect: use unsigned longs
  s390/ptrace: get rid of long longs in psw_bits
  s390/sysinfo: add missing SYSIB 1.2.2 multithreading fields
  s390: get rid of CONFIG_SCHED_MC and CONFIG_SCHED_BOOK
  s390/Kconfig: remove pointless 64 bit dependencies
  s390/dasd: fix failfast for disconnected devices
  s390/con3270: testing return kzalloc retval
  s390/hmcdrv: constify hmcdrv_ftp_ops structs
  s390/cio: add NULL test
  s390/cio: Change I/O instructions from inline to normal functions
  s390/cio: Introduce common I/O layer tracepoints
  s390/cio: Consolidate inline assemblies and related data definitions
  s390/cio: Fix incorrect xsch opcode specification
  s390/cio: Remove unused inline assemblies
  ...
Linus Torvalds 9 years ago
parent
commit
cbd88cd4c0
93 changed files with 1948 additions and 1567 deletions
  1. 9 13
      Documentation/s390/zfcpdump.txt
  2. 5 6
      arch/s390/Kconfig
  3. 9 0
      arch/s390/Makefile
  4. 12 15
      arch/s390/configs/default_defconfig
  5. 10 14
      arch/s390/configs/gcov_defconfig
  6. 10 14
      arch/s390/configs/performance_defconfig
  7. 2 8
      arch/s390/configs/zfcpdump_defconfig
  8. 27 3
      arch/s390/defconfig
  9. 1 1
      arch/s390/include/asm/compat.h
  10. 0 14
      arch/s390/include/asm/crw.h
  11. 5 6
      arch/s390/include/asm/elf.h
  12. 58 0
      arch/s390/include/asm/facilities_src.h
  13. 13 4
      arch/s390/include/asm/facility.h
  14. 1 9
      arch/s390/include/asm/fpu/internal.h
  15. 6 8
      arch/s390/include/asm/ipl.h
  16. 3 24
      arch/s390/include/asm/lowcore.h
  17. 1 1
      arch/s390/include/asm/os_info.h
  18. 2 0
      arch/s390/include/asm/pci_dma.h
  19. 12 0
      arch/s390/include/asm/processor.h
  20. 19 19
      arch/s390/include/asm/ptrace.h
  21. 1 2
      arch/s390/include/asm/reset.h
  22. 7 6
      arch/s390/include/asm/sclp.h
  23. 14 17
      arch/s390/include/asm/setup.h
  24. 1 1
      arch/s390/include/asm/smp.h
  25. 13 4
      arch/s390/include/asm/sysinfo.h
  26. 0 2
      arch/s390/include/asm/thread_info.h
  27. 3 3
      arch/s390/include/asm/topology.h
  28. 4 2
      arch/s390/include/asm/vdso.h
  29. 5 3
      arch/s390/kernel/Makefile
  30. 89 86
      arch/s390/kernel/asm-offsets.c
  31. 201 253
      arch/s390/kernel/crash_dump.c
  32. 2 2
      arch/s390/kernel/dis.c
  33. 9 0
      arch/s390/kernel/early.c
  34. 2 0
      arch/s390/kernel/entry.S
  35. 17 30
      arch/s390/kernel/head.S
  36. 1 1
      arch/s390/kernel/head64.S
  37. 3 18
      arch/s390/kernel/ipl.c
  38. 61 49
      arch/s390/kernel/machine_kexec.c
  39. 4 3
      arch/s390/kernel/os_info.c
  40. 51 43
      arch/s390/kernel/reipl.S
  41. 50 15
      arch/s390/kernel/sclp.c
  42. 10 13
      arch/s390/kernel/setup.c
  43. 80 81
      arch/s390/kernel/smp.c
  44. 10 10
      arch/s390/kernel/sysinfo.c
  45. 0 3
      arch/s390/kernel/traps.c
  46. 11 6
      arch/s390/kernel/vdso.c
  47. 1 1
      arch/s390/kernel/vdso32/Makefile
  48. 43 0
      arch/s390/kernel/vdso32/getcpu.S
  49. 1 0
      arch/s390/kernel/vdso32/vdso32.lds.S
  50. 1 1
      arch/s390/kernel/vdso64/Makefile
  51. 42 0
      arch/s390/kernel/vdso64/getcpu.S
  52. 1 0
      arch/s390/kernel/vdso64/vdso64.lds.S
  53. 2 2
      arch/s390/kvm/interrupt.c
  54. 15 15
      arch/s390/kvm/kvm-s390.c
  55. 1 1
      arch/s390/kvm/priv.c
  56. 32 13
      arch/s390/lib/spinlock.c
  57. 6 2
      arch/s390/mm/extable.c
  58. 1 3
      arch/s390/mm/extmem.c
  59. 0 2
      arch/s390/mm/fault.c
  60. 1 0
      arch/s390/mm/gup.c
  61. 2 2
      arch/s390/mm/maccess.c
  62. 2 5
      arch/s390/mm/mem_detect.c
  63. 1 2
      arch/s390/pci/pci.c
  64. 14 5
      arch/s390/pci/pci_dma.c
  65. 1 0
      arch/s390/tools/.gitignore
  66. 15 0
      arch/s390/tools/Makefile
  67. 67 0
      arch/s390/tools/gen_facilities.c
  68. 6 2
      drivers/s390/block/dasd.c
  69. 8 13
      drivers/s390/char/Kconfig
  70. 1 4
      drivers/s390/char/Makefile
  71. 2 0
      drivers/s390/char/con3215.c
  72. 2 0
      drivers/s390/char/con3270.c
  73. 3 3
      drivers/s390/char/hmcdrv_ftp.c
  74. 2 3
      drivers/s390/char/sclp.c
  75. 101 1
      drivers/s390/char/sclp_config.c
  76. 0 40
      drivers/s390/char/sclp_cpi.c
  77. 48 402
      drivers/s390/char/zcore.c
  78. 4 1
      drivers/s390/cio/Makefile
  79. 1 0
      drivers/s390/cio/airq.c
  80. 2 3
      drivers/s390/cio/chsc_sch.c
  81. 18 19
      drivers/s390/cio/cio.c
  82. 12 0
      drivers/s390/cio/cio.h
  83. 1 0
      drivers/s390/cio/crw.c
  84. 1 1
      drivers/s390/cio/css.c
  85. 1 1
      drivers/s390/cio/device_fsm.c
  86. 0 45
      drivers/s390/cio/io_sch.h
  87. 224 0
      drivers/s390/cio/ioasm.c
  88. 15 154
      drivers/s390/cio/ioasm.h
  89. 2 4
      drivers/s390/cio/qdio_debug.c
  90. 24 0
      drivers/s390/cio/trace.c
  91. 363 0
      drivers/s390/cio/trace.h
  92. 2 4
      drivers/s390/crypto/zcrypt_api.c
  93. 2 1
      scripts/Makefile.lib

+ 9 - 13
Documentation/s390/zfcpdump.txt

@@ -15,19 +15,15 @@ the s390-tools package) to make the device bootable. The operator of a Linux
 system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump
 resides on.
 
-The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem",
-which exports memory and registers of the crashed Linux in an s390
-standalone dump format. It can be used in the same way as e.g. /dev/mem. The
-dump format defines a 4K header followed by plain uncompressed memory. The
-register sets are stored in the prefix pages of the respective CPUs. To build a
-dump enabled kernel with the zcore driver, the kernel config option
-CONFIG_CRASH_DUMP has to be set. When reading from "zcore/mem", the part of
-memory, which has been saved by hardware is read by the driver via the SCLP
-hardware interface. The second part is just copied from the non overwritten real
-memory.
-
-Since kernel version 3.12 also the /proc/vmcore file can also be used to access
-the dump.
+The user space dump tool accesses the memory of the crashed system by means
+of the /proc/vmcore interface. This interface exports the crashed system's
+memory and registers in ELF core dump format. To access the memory which has
+been saved by the hardware SCLP requests will be created at the time the data
+is needed by /proc/vmcore. The tail part of the crashed systems memory which
+has not been stashed by hardware can just be copied from real memory.
+
+To build a dump enabled kernel the kernel config option CONFIG_CRASH_DUMP
+has to be set.
 
 To get a valid zfcpdump kernel configuration use "make zfcpdump_defconfig".
 

+ 5 - 6
arch/s390/Kconfig

@@ -166,8 +166,7 @@ config SCHED_OMIT_FRAME_POINTER
 
 config PGTABLE_LEVELS
 	int
-	default 4 if 64BIT
-	default 2
+	default 4
 
 source "init/Kconfig"
 
@@ -390,9 +389,6 @@ config HOTPLUG_CPU
 	  can be controlled through /sys/devices/system/cpu/cpu#.
 	  Say N if you want to disable CPU hotplug.
 
-config SCHED_SMT
-	def_bool n
-
 # Some NUMA nodes have memory ranges that span
 # other nodes.	Even though a pfn is valid and
 # between a node's start and end pfns, it may not
@@ -403,7 +399,7 @@ config NODES_SPAN_OTHER_NODES
 
 config NUMA
 	bool "NUMA support"
-	depends on SMP && 64BIT && SCHED_TOPOLOGY
+	depends on SMP && SCHED_TOPOLOGY
 	default n
 	help
 	  Enable NUMA support
@@ -463,6 +459,9 @@ config EMU_SIZE
 
 endmenu
 
+config SCHED_SMT
+	def_bool n
+
 config SCHED_MC
 	def_bool n
 

+ 9 - 0
arch/s390/Makefile

@@ -106,6 +106,7 @@ drivers-y	+= drivers/s390/
 drivers-$(CONFIG_OPROFILE)	+= arch/s390/oprofile/
 
 boot		:= arch/s390/boot
+tools		:= arch/s390/tools
 
 all: image bzImage
 
@@ -124,9 +125,17 @@ vdso_install:
 
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
+	$(Q)$(MAKE) $(clean)=$(tools)
+
+archprepare:
+	$(Q)$(MAKE) $(build)=$(tools) include/generated/facilities.h
 
 # Don't use tabs in echo arguments
 define archhelp
   echo  '* image           - Kernel image for IPL ($(boot)/image)'
   echo	'* bzImage         - Compressed kernel image for IPL ($(boot)/bzImage)'
+  echo	'  install         - Install kernel using'
+  echo	'                    (your) ~/bin/$(INSTALLKERNEL) or'
+  echo	'                    (distribution) /sbin/$(INSTALLKERNEL) or'
+  echo	'                    install to $$(INSTALL_PATH)'
 endef

+ 12 - 15
arch/s390/configs/default_defconfig

@@ -10,28 +10,35 @@ CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_STATIC_KEYS_SELFTEST=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
@@ -64,7 +71,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
 CONFIG_NET=y
@@ -106,7 +112,6 @@ CONFIG_TCP_CONG_LP=m
 CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_IPV6=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -457,19 +462,9 @@ CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_JBD_DEBUG=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -490,7 +485,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
+CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -542,10 +537,11 @@ CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_READABLE_ASM=y
 CONFIG_UNUSED_SYMBOLS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_DEBUG_OBJECTS=y
@@ -588,6 +584,7 @@ CONFIG_FAILSLAB=y
 CONFIG_FAIL_PAGE_ALLOC=y
 CONFIG_FAIL_MAKE_REQUEST=y
 CONFIG_FAIL_IO_TIMEOUT=y
+CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LATENCYTOP=y

+ 10 - 14
arch/s390/configs/gcov_defconfig

@@ -10,21 +10,27 @@ CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CGROUP_PERF=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
@@ -61,7 +67,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
 CONFIG_NET=y
@@ -103,7 +108,6 @@ CONFIG_TCP_CONG_LP=m
 CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_IPV6=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -453,19 +457,9 @@ CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_JBD_DEBUG=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -485,7 +479,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
+CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -550,6 +544,7 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_LKDTM=m
@@ -557,6 +552,7 @@ CONFIG_RBTREE_TEST=m
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BPF=m
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m

+ 10 - 14
arch/s390/configs/performance_defconfig

@@ -10,22 +10,28 @@ CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
 CONFIG_CGROUP_PERF=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
@@ -61,7 +67,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
 CONFIG_NET=y
@@ -103,7 +108,6 @@ CONFIG_TCP_CONG_LP=m
 CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_IPV6=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -453,19 +457,9 @@ CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_JBD_DEBUG=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -485,7 +479,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
+CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -546,6 +540,7 @@ CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
@@ -554,6 +549,7 @@ CONFIG_UPROBE_EVENT=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BPF=m
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m

+ 2 - 8
arch/s390/configs/zfcpdump_defconfig

@@ -23,8 +23,6 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_SECCOMP is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
-CONFIG_ATM=y
-CONFIG_ATM_LANE=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -54,14 +52,10 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_S390_VMUR is not set
 # CONFIG_HID is not set
 # CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
 CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y

+ 27 - 3
arch/s390/defconfig

@@ -11,22 +11,31 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
+CONFIG_STATIC_KEYS_SELFTEST=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
@@ -37,6 +46,7 @@ CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
 CONFIG_MARCH_Z196=y
 CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -52,7 +62,6 @@ CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
 CONFIG_L2TP=m
 CONFIG_L2TP_DEBUGFS=m
 CONFIG_VLAN_8021Q=y
@@ -89,10 +98,26 @@ CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
@@ -137,7 +162,6 @@ CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-# CONFIG_RCU_CPU_STALL_INFO is not set
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y

+ 1 - 1
arch/s390/include/asm/compat.h

@@ -284,7 +284,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 
 static inline int is_compat_task(void)
 {
-	return is_32bit_task();
+	return test_thread_flag(TIF_31BIT);
 }
 
 static inline void __user *arch_compat_alloc_user_space(long len)

+ 0 - 14
arch/s390/include/asm/crw.h

@@ -52,18 +52,4 @@ void crw_wait_for_channel_report(void);
 #define CRW_ERC_PERRI	 0x07 /* perm. error, facility init */
 #define CRW_ERC_PMOD	 0x08 /* installed parameters modified */
 
-static inline int stcrw(struct crw *pcrw)
-{
-	int ccode;
-
-	asm volatile(
-		"	stcrw	0(%2)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (ccode), "=m" (*pcrw)
-		: "a" (pcrw)
-		: "cc" );
-	return ccode;
-}
-
 #endif /* _ASM_S390_CRW_H */

+ 5 - 6
arch/s390/include/asm/elf.h

@@ -129,6 +129,7 @@ typedef s390_regs elf_gregset_t;
 typedef s390_fp_regs compat_elf_fpregset_t;
 typedef s390_compat_regs compat_elf_gregset_t;
 
+#include <linux/compat.h>
 #include <linux/sched.h>	/* for task_struct */
 #include <asm/mmu_context.h>
 
@@ -162,7 +163,7 @@ extern unsigned int vdso_enabled;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk. 64-bit
    tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
 				(STACK_TOP / 3 * 2) : \
 				(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
@@ -219,9 +220,9 @@ do {								\
  * of up to 1GB. For 31-bit processes the virtual address space is limited,
  * use no alignment and limit the randomization to 8MB.
  */
-#define BRK_RND_MASK	(is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
-#define MMAP_RND_MASK	(is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
-#define MMAP_ALIGN_MASK	(is_32bit_task() ? 0 : 0x7fUL)
+#define BRK_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ff80UL)
+#define MMAP_ALIGN_MASK	(is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK	MMAP_RND_MASK
 
 #define ARCH_DLINFO							    \
@@ -236,6 +237,4 @@ struct linux_binprm;
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 int arch_setup_additional_pages(struct linux_binprm *, int);
 
-void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
-
 #endif

+ 58 - 0
arch/s390/include/asm/facilities_src.h

@@ -0,0 +1,58 @@
+/*
+ *    Copyright IBM Corp. 2015
+ */
+
+#ifndef S390_GEN_FACILITIES_C
+#error "This file can only be included by gen_facilities.c"
+#endif
+
+#include <linux/kconfig.h>
+
+struct facility_def {
+	char *name;
+	int *bits;
+};
+
+static struct facility_def facility_defs[] = {
+	{
+		/*
+		 * FACILITIES_ALS contains the list of facilities that are
+		 * required to run a kernel that is compiled e.g. with
+		 * -march=<machine>.
+		 */
+		.name = "FACILITIES_ALS",
+		.bits = (int[]){
+#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
+			0,  /* N3 instructions */
+			1,  /* z/Arch mode installed */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+			18, /* long displacement facility */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+			7,  /* stfle */
+			17, /* message security assist */
+			21, /* extended-immediate facility */
+			25, /* store clock fast */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+			27, /* mvcos */
+			32, /* compare and swap and store */
+			33, /* compare and swap and store 2 */
+			34, /* general extension facility */
+			35, /* execute extensions */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+			45, /* fast-BCR, etc. */
+#endif
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+			49, /* misc-instruction-extensions */
+			52, /* interlocked facility 2 */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+			53, /* load-and-zero-rightmost-byte, etc. */
+#endif
+			-1 /* END */
+		}
+	},
+};

+ 13 - 4
arch/s390/include/asm/facility.h

@@ -7,6 +7,10 @@
 #ifndef __ASM_FACILITY_H
 #define __ASM_FACILITY_H
 
+#include <generated/facilities.h>
+
+#ifndef __ASSEMBLY__
+
 #include <linux/string.h>
 #include <linux/preempt.h>
 #include <asm/lowcore.h>
@@ -30,6 +34,12 @@ static inline int __test_facility(unsigned long nr, void *facilities)
  */
 static inline int test_facility(unsigned long nr)
 {
+	unsigned long facilities_als[] = { FACILITIES_ALS };
+
+	if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
+		if (__test_facility(nr, &facilities_als))
+			return 1;
+	}
 	return __test_facility(nr, &S390_lowcore.stfle_fac_list);
 }
 
@@ -44,10 +54,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
 
 	preempt_disable();
 	asm volatile(
-		"	.insn s,0xb2b10000,0(0)\n" /* stfl */
-		"0:\n"
-		EX_TABLE(0b, 0b)
-		: "+m" (S390_lowcore.stfl_fac_list));
+		"	stfl	0(0)\n"
+		: "=m" (S390_lowcore.stfl_fac_list));
 	nr = 4; /* bytes stored by stfl */
 	memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
 	if (S390_lowcore.stfl_fac_list & 0x01000000) {
@@ -64,4 +72,5 @@ static inline void stfle(u64 *stfle_fac_list, int size)
 	preempt_enable();
 }
 
+#endif /* __ASSEMBLY__ */
 #endif /* __ASM_FACILITY_H */

+ 1 - 9
arch/s390/include/asm/fpu/internal.h

@@ -12,21 +12,13 @@
 #include <asm/ctl_reg.h>
 #include <asm/fpu/types.h>
 
-static inline void save_vx_regs_safe(__vector128 *vxrs)
+static inline void save_vx_regs(__vector128 *vxrs)
 {
-	unsigned long cr0, flags;
-
-	flags = arch_local_irq_save();
-	__ctl_store(cr0, 0, 0);
-	__ctl_set_bit(0, 17);
-	__ctl_set_bit(0, 18);
 	asm volatile(
 		"	la	1,%0\n"
 		"	.word	0xe70f,0x1000,0x003e\n"	/* vstm 0,15,0(1) */
 		"	.word	0xe70f,0x1100,0x0c3e\n"	/* vstm 16,31,256(1) */
 		: "=Q" (*(struct vx_array *) vxrs) : : "1");
-	__ctl_load(cr0, 0, 0);
-	arch_local_irq_restore(flags);
 }
 
 static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)

+ 6 - 8
arch/s390/include/asm/ipl.h

@@ -87,14 +87,12 @@ struct ipl_parameter_block {
  * IPL validity flags
  */
 extern u32 ipl_flags;
-extern u32 dump_prefix_page;
 
-struct dump_save_areas {
-	struct save_area_ext **areas;
-	int count;
-};
-
-extern struct dump_save_areas dump_save_areas;
+struct save_area;
+struct save_area * __init save_area_alloc(bool is_boot_cpu);
+struct save_area * __init save_area_boot_cpu(void);
+void __init save_area_add_regs(struct save_area *, void *regs);
+void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
 
 extern void do_reipl(void);
 extern void do_halt(void);
@@ -176,7 +174,7 @@ enum diag308_rc {
 
 extern int diag308(unsigned long subcode, void *addr);
 extern void diag308_reset(void);
-extern void store_status(void);
+extern void store_status(void (*fn)(void *), void *data);
 extern void lgr_info_log(void);
 
 #endif /* _ASM_S390_IPL_H */

+ 3 - 24
arch/s390/include/asm/lowcore.h

@@ -16,28 +16,7 @@
 #define LC_ORDER 1
 #define LC_PAGES 2
 
-struct save_area {
-	u64	fp_regs[16];
-	u64	gp_regs[16];
-	u8	psw[16];
-	u8	pad1[8];
-	u32	pref_reg;
-	u32	fp_ctrl_reg;
-	u8	pad2[4];
-	u32	tod_reg;
-	u64	timer;
-	u64	clk_cmp;
-	u8	pad3[8];
-	u32	acc_regs[16];
-	u64	ctrl_regs[16];
-} __packed;
-
-struct save_area_ext {
-	struct save_area	sa;
-	__vector128		vx_regs[32];
-};
-
-struct _lowcore {
+struct lowcore {
 	__u8	pad_0x0000[0x0014-0x0000];	/* 0x0000 */
 	__u32	ipl_parmblock_ptr;		/* 0x0014 */
 	__u8	pad_0x0018[0x0080-0x0018];	/* 0x0018 */
@@ -204,9 +183,9 @@ struct _lowcore {
 	__u8	vector_save_area[1024];		/* 0x1c00 */
 } __packed;
 
-#define S390_lowcore (*((struct _lowcore *) 0))
+#define S390_lowcore (*((struct lowcore *) 0))
 
-extern struct _lowcore *lowcore_ptr[];
+extern struct lowcore *lowcore_ptr[];
 
 static inline void set_prefix(__u32 address)
 {

+ 1 - 1
arch/s390/include/asm/os_info.h

@@ -38,7 +38,7 @@ u32 os_info_csum(struct os_info *os_info);
 
 #ifdef CONFIG_CRASH_DUMP
 void *os_info_old_entry(int nr, unsigned long *size);
-int copy_from_oldmem(void *dest, void *src, size_t count);
+int copy_oldmem_kernel(void *dst, void *src, size_t count);
 #else
 static inline void *os_info_old_entry(int nr, unsigned long *size)
 {

+ 2 - 0
arch/s390/include/asm/pci_dma.h

@@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
 #define ZPCI_IOTA_FS_2G			2
 #define ZPCI_KEY			(PAGE_DEFAULT_KEY << 5)
 
+#define ZPCI_TABLE_SIZE_RT	(1UL << 42)
+
 #define ZPCI_IOTA_STO_FLAG	(ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
 #define ZPCI_IOTA_RTTO_FLAG	(ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
 #define ZPCI_IOTA_RSTO_FLAG	(ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)

+ 12 - 0
arch/s390/include/asm/processor.h

@@ -18,12 +18,14 @@
 #define CIF_NOHZ_DELAY		2	/* delay HZ disable for a tick */
 #define CIF_FPU			3	/* restore FPU registers */
 #define CIF_IGNORE_IRQ		4	/* ignore interrupt (for udelay) */
+#define CIF_ENABLED_WAIT	5	/* in enabled wait state */
 
 #define _CIF_MCCK_PENDING	_BITUL(CIF_MCCK_PENDING)
 #define _CIF_ASCE		_BITUL(CIF_ASCE)
 #define _CIF_NOHZ_DELAY		_BITUL(CIF_NOHZ_DELAY)
 #define _CIF_FPU		_BITUL(CIF_FPU)
 #define _CIF_IGNORE_IRQ		_BITUL(CIF_IGNORE_IRQ)
+#define _CIF_ENABLED_WAIT	_BITUL(CIF_ENABLED_WAIT)
 
 #ifndef __ASSEMBLY__
 
@@ -52,6 +54,16 @@ static inline int test_cpu_flag(int flag)
 	return !!(S390_lowcore.cpu_flags & (1UL << flag));
 }
 
+/*
+ * Test CIF flag of another CPU. The caller needs to ensure that
+ * CPU hotplug can not happen, e.g. by disabling preemption.
+ */
+static inline int test_cpu_flag_of(int flag, int cpu)
+{
+	struct lowcore *lc = lowcore_ptr[cpu];
+	return !!(lc->cpu_flags & (1UL << flag));
+}
+
 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
 
 /*

+ 19 - 19
arch/s390/include/asm/ptrace.h

@@ -24,25 +24,25 @@
 			 PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
 
 struct psw_bits {
-	unsigned long long	: 1;
-	unsigned long long r	: 1; /* PER-Mask */
-	unsigned long long	: 3;
-	unsigned long long t	: 1; /* DAT Mode */
-	unsigned long long i	: 1; /* Input/Output Mask */
-	unsigned long long e	: 1; /* External Mask */
-	unsigned long long key	: 4; /* PSW Key */
-	unsigned long long	: 1;
-	unsigned long long m	: 1; /* Machine-Check Mask */
-	unsigned long long w	: 1; /* Wait State */
-	unsigned long long p	: 1; /* Problem State */
-	unsigned long long as	: 2; /* Address Space Control */
-	unsigned long long cc	: 2; /* Condition Code */
-	unsigned long long pm	: 4; /* Program Mask */
-	unsigned long long ri	: 1; /* Runtime Instrumentation */
-	unsigned long long	: 6;
-	unsigned long long eaba : 2; /* Addressing Mode */
-	unsigned long long	: 31;
-	unsigned long long ia	: 64;/* Instruction Address */
+	unsigned long	   :  1;
+	unsigned long r	   :  1; /* PER-Mask */
+	unsigned long	   :  3;
+	unsigned long t	   :  1; /* DAT Mode */
+	unsigned long i	   :  1; /* Input/Output Mask */
+	unsigned long e	   :  1; /* External Mask */
+	unsigned long key  :  4; /* PSW Key */
+	unsigned long	   :  1;
+	unsigned long m	   :  1; /* Machine-Check Mask */
+	unsigned long w	   :  1; /* Wait State */
+	unsigned long p	   :  1; /* Problem State */
+	unsigned long as   :  2; /* Address Space Control */
+	unsigned long cc   :  2; /* Condition Code */
+	unsigned long pm   :  4; /* Program Mask */
+	unsigned long ri   :  1; /* Runtime Instrumentation */
+	unsigned long	   :  6;
+	unsigned long eaba :  2; /* Addressing Mode */
+	unsigned long	   : 31;
+	unsigned long ia   : 64; /* Instruction Address */
 };
 
 enum {

+ 1 - 2
arch/s390/include/asm/reset.h

@@ -15,6 +15,5 @@ struct reset_call {
 
 extern void register_reset_call(struct reset_call *reset);
 extern void unregister_reset_call(struct reset_call *reset);
-extern void s390_reset_system(void (*fn_pre)(void),
-			      void (*fn_post)(void *), void *data);
+extern void s390_reset_system(void);
 #endif /* _ASM_S390_RESET_H */

+ 7 - 6
arch/s390/include/asm/sclp.h

@@ -63,12 +63,12 @@ struct sclp_info {
 	unsigned int mtid;
 	unsigned int mtid_cp;
 	unsigned int mtid_prev;
-	unsigned long long rzm;
-	unsigned long long rnmax;
-	unsigned long long hamax;
+	unsigned long rzm;
+	unsigned long rnmax;
+	unsigned long hamax;
 	unsigned int max_cores;
 	unsigned long hsa_size;
-	unsigned long long facilities;
+	unsigned long facilities;
 };
 extern struct sclp_info sclp;
 
@@ -83,8 +83,9 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
 void sclp_get_ipl_info(struct sclp_ipl_info *info);
 int sclp_pci_configure(u32 fid);
 int sclp_pci_deconfigure(u32 fid);
-int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
+int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
+int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
 void sclp_early_detect(void);
-int _sclp_print_early(const char *);
+void _sclp_print_early(const char *);
 
 #endif /* _ASM_S390_SCLP_H */

+ 14 - 17
arch/s390/include/asm/setup.h

@@ -12,27 +12,24 @@
 #define PARMAREA		0x10400
 
 /*
- * Machine features detected in head.S
+ * Machine features detected in early.c
  */
 
 #define MACHINE_FLAG_VM		_BITUL(0)
-#define MACHINE_FLAG_IEEE	_BITUL(1)
-#define MACHINE_FLAG_CSP	_BITUL(2)
-#define MACHINE_FLAG_MVPG	_BITUL(3)
-#define MACHINE_FLAG_DIAG44	_BITUL(4)
+#define MACHINE_FLAG_KVM	_BITUL(1)
+#define MACHINE_FLAG_LPAR	_BITUL(2)
+#define MACHINE_FLAG_DIAG9C	_BITUL(3)
+#define MACHINE_FLAG_ESOP	_BITUL(4)
 #define MACHINE_FLAG_IDTE	_BITUL(5)
-#define MACHINE_FLAG_DIAG9C	_BITUL(6)
-#define MACHINE_FLAG_KVM	_BITUL(8)
-#define MACHINE_FLAG_ESOP	_BITUL(9)
-#define MACHINE_FLAG_EDAT1	_BITUL(10)
-#define MACHINE_FLAG_EDAT2	_BITUL(11)
-#define MACHINE_FLAG_LPAR	_BITUL(12)
-#define MACHINE_FLAG_LPP	_BITUL(13)
-#define MACHINE_FLAG_TOPOLOGY	_BITUL(14)
-#define MACHINE_FLAG_TE		_BITUL(15)
-#define MACHINE_FLAG_TLB_LC	_BITUL(17)
-#define MACHINE_FLAG_VX		_BITUL(18)
-#define MACHINE_FLAG_CAD	_BITUL(19)
+#define MACHINE_FLAG_DIAG44	_BITUL(6)
+#define MACHINE_FLAG_EDAT1	_BITUL(7)
+#define MACHINE_FLAG_EDAT2	_BITUL(8)
+#define MACHINE_FLAG_LPP	_BITUL(9)
+#define MACHINE_FLAG_TOPOLOGY	_BITUL(10)
+#define MACHINE_FLAG_TE		_BITUL(11)
+#define MACHINE_FLAG_TLB_LC	_BITUL(12)
+#define MACHINE_FLAG_VX		_BITUL(13)
+#define MACHINE_FLAG_CAD	_BITUL(14)
 
 #define LPP_MAGIC		_BITUL(31)
 #define LPP_PFAULT_PID_MASK	_AC(0xffffffff, UL)

+ 1 - 1
arch/s390/include/asm/smp.h

@@ -18,6 +18,7 @@
 extern struct mutex smp_cpu_state_mutex;
 extern unsigned int smp_cpu_mt_shift;
 extern unsigned int smp_cpu_mtid;
+extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
 
 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
 
@@ -55,7 +56,6 @@ static inline int smp_store_status(int cpu) { return 0; }
 static inline int smp_vcpu_scheduled(int cpu) { return 1; }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_fill_possible_mask(void) { }
-static inline void smp_save_dump_cpus(void) { }
 
 #endif /* CONFIG_SMP */
 

+ 13 - 4
arch/s390/include/asm/sysinfo.h

@@ -56,7 +56,12 @@ struct sysinfo_1_2_2 {
 	char format;
 	char reserved_0[1];
 	unsigned short acc_offset;
-	char reserved_1[20];
+	unsigned char mt_installed :1;
+	unsigned char :2;
+	unsigned char mt_stid :5;
+	unsigned char :3;
+	unsigned char mt_gtid :5;
+	char reserved_1[18];
 	unsigned int nominal_cap;
 	unsigned int secondary_cap;
 	unsigned int capability;
@@ -92,9 +97,13 @@ struct sysinfo_2_2_2 {
 	char name[8];
 	unsigned int caf;
 	char reserved_2[8];
-	unsigned char mt_installed;
-	unsigned char mt_general;
-	unsigned char mt_psmtid;
+	unsigned char mt_installed :1;
+	unsigned char :2;
+	unsigned char mt_stid :5;
+	unsigned char :3;
+	unsigned char mt_gtid :5;
+	unsigned char :3;
+	unsigned char mt_psmtid :5;
 	char reserved_3[5];
 	unsigned short cpus_dedicated;
 	unsigned short cpus_shared;

+ 0 - 2
arch/s390/include/asm/thread_info.h

@@ -96,6 +96,4 @@ void arch_release_task_struct(struct task_struct *tsk);
 #define _TIF_31BIT		_BITUL(TIF_31BIT)
 #define _TIF_SINGLE_STEP	_BITUL(TIF_SINGLE_STEP)
 
-#define is_32bit_task()		(test_thread_flag(TIF_31BIT))
-
 #endif /* _ASM_THREAD_INFO_H */

+ 3 - 3
arch/s390/include/asm/topology.h

@@ -7,7 +7,7 @@
 struct sysinfo_15_1_x;
 struct cpu;
 
-#ifdef CONFIG_SCHED_BOOK
+#ifdef CONFIG_SCHED_TOPOLOGY
 
 struct cpu_topology_s390 {
 	unsigned short thread_id;
@@ -40,13 +40,13 @@ void store_topology(struct sysinfo_15_1_x *info);
 void topology_expect_change(void);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
-#else /* CONFIG_SCHED_BOOK */
+#else /* CONFIG_SCHED_TOPOLOGY */
 
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
 static inline void topology_expect_change(void) { }
 
-#endif /* CONFIG_SCHED_BOOK */
+#endif /* CONFIG_SCHED_TOPOLOGY */
 
 #define POLARIZATION_UNKNOWN	(-1)
 #define POLARIZATION_HRZ	(0)

+ 4 - 2
arch/s390/include/asm/vdso.h

@@ -38,12 +38,14 @@ struct vdso_data {
 struct vdso_per_cpu_data {
 	__u64 ectg_timer_base;
 	__u64 ectg_user_time;
+	__u32 cpu_nr;
+	__u32 node_id;
 };
 
 extern struct vdso_data *vdso_data;
 
-int vdso_alloc_per_cpu(struct _lowcore *lowcore);
-void vdso_free_per_cpu(struct _lowcore *lowcore);
+int vdso_alloc_per_cpu(struct lowcore *lowcore);
+void vdso_free_per_cpu(struct lowcore *lowcore);
 
 #endif /* __ASSEMBLY__ */
 #endif /* __S390_VDSO_H__ */

+ 5 - 3
arch/s390/kernel/Makefile

@@ -34,8 +34,10 @@ CFLAGS_sysinfo.o += -w
 #
 CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
 ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp.o	+= -march=z900
+CFLAGS_REMOVE_sclp.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_sclp.o		+= -march=z900
+AFLAGS_REMOVE_head.o	+= $(CC_FLAGS_MARCH)
+AFLAGS_head.o		+= -march=z900
 endif
 GCOV_PROFILE_sclp.o := n
 
@@ -50,7 +52,7 @@ extra-y				+= head.o head64.o vmlinux.lds
 
 obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
 obj-$(CONFIG_SMP)		+= smp.o
-obj-$(CONFIG_SCHED_BOOK)	+= topology.o
+obj-$(CONFIG_SCHED_TOPOLOGY)	+= topology.o
 obj-$(CONFIG_HIBERNATION)	+= suspend.o swsusp.o
 obj-$(CONFIG_AUDIT)		+= audit.o
 compat-obj-$(CONFIG_AUDIT)	+= compat_audit.o

+ 89 - 86
arch/s390/kernel/asm-offsets.c

@@ -80,6 +80,8 @@ int main(void)
 	OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
 	OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
 	OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
+	OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
+	OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id);
 	BLANK();
 	/* constants used by the vdso */
 	DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
@@ -97,95 +99,96 @@ int main(void)
 	OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
 	BLANK();
 	/* hardware defined lowcore locations 0x000 - 0x1ff */
-	OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params);
-	OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr);
-	OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code);
-	OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc);
-	OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code);
-	OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc);
-	OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code);
-	OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code);
-	OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num);
-	OFFSET(__LC_PER_CODE, _lowcore, per_code);
-	OFFSET(__LC_PER_ATMID, _lowcore, per_atmid);
-	OFFSET(__LC_PER_ADDRESS, _lowcore, per_address);
-	OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id);
-	OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id);
-	OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id);
-	OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id);
-	OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code);
-	OFFSET(__LC_MON_CODE, _lowcore, monitor_code);
-	OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id);
-	OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr);
-	OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm);
-	OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word);
-	OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list);
-	OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code);
-	OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address);
-	OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr);
-	OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw);
-	OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw);
-	OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw);
-	OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw);
-	OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw);
-	OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw);
-	OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw);
-	OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw);
-	OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw);
-	OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw);
-	OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw);
-	OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw);
+	OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
+	OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
+	OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
+	OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
+	OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
+	OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
+	OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
+	OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
+	OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num);
+	OFFSET(__LC_PER_CODE, lowcore, per_code);
+	OFFSET(__LC_PER_ATMID, lowcore, per_atmid);
+	OFFSET(__LC_PER_ADDRESS, lowcore, per_address);
+	OFFSET(__LC_EXC_ACCESS_ID, lowcore, exc_access_id);
+	OFFSET(__LC_PER_ACCESS_ID, lowcore, per_access_id);
+	OFFSET(__LC_OP_ACCESS_ID, lowcore, op_access_id);
+	OFFSET(__LC_AR_MODE_ID, lowcore, ar_mode_id);
+	OFFSET(__LC_TRANS_EXC_CODE, lowcore, trans_exc_code);
+	OFFSET(__LC_MON_CODE, lowcore, monitor_code);
+	OFFSET(__LC_SUBCHANNEL_ID, lowcore, subchannel_id);
+	OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
+	OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
+	OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
+	OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
+	OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
+	OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
+	OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
+	OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
+	OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
+	OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
+	OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
+	OFFSET(__LC_PGM_OLD_PSW, lowcore, program_old_psw);
+	OFFSET(__LC_MCK_OLD_PSW, lowcore, mcck_old_psw);
+	OFFSET(__LC_IO_OLD_PSW, lowcore, io_old_psw);
+	OFFSET(__LC_RST_NEW_PSW, lowcore, restart_psw);
+	OFFSET(__LC_EXT_NEW_PSW, lowcore, external_new_psw);
+	OFFSET(__LC_SVC_NEW_PSW, lowcore, svc_new_psw);
+	OFFSET(__LC_PGM_NEW_PSW, lowcore, program_new_psw);
+	OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
+	OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
 	/* software defined lowcore locations 0x200 - 0xdff*/
-	OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync);
-	OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async);
-	OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart);
-	OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags);
-	OFFSET(__LC_RETURN_PSW, _lowcore, return_psw);
-	OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw);
-	OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer);
-	OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer);
-	OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer);
-	OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer);
-	OFFSET(__LC_USER_TIMER, _lowcore, user_timer);
-	OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer);
-	OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer);
-	OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer);
-	OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock);
-	OFFSET(__LC_INT_CLOCK, _lowcore, int_clock);
-	OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock);
-	OFFSET(__LC_CURRENT, _lowcore, current_task);
-	OFFSET(__LC_THREAD_INFO, _lowcore, thread_info);
-	OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack);
-	OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack);
-	OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack);
-	OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack);
-	OFFSET(__LC_RESTART_FN, _lowcore, restart_fn);
-	OFFSET(__LC_RESTART_DATA, _lowcore, restart_data);
-	OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source);
-	OFFSET(__LC_USER_ASCE, _lowcore, user_asce);
-	OFFSET(__LC_LPP, _lowcore, lpp);
-	OFFSET(__LC_CURRENT_PID, _lowcore, current_pid);
-	OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset);
-	OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data);
-	OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
-	OFFSET(__LC_GMAP, _lowcore, gmap);
-	OFFSET(__LC_PASTE, _lowcore, paste);
+	OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
+	OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
+	OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
+	OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
+	OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
+	OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
+	OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer);
+	OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer);
+	OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer);
+	OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer);
+	OFFSET(__LC_USER_TIMER, lowcore, user_timer);
+	OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer);
+	OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer);
+	OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
+	OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
+	OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
+	OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
+	OFFSET(__LC_CURRENT, lowcore, current_task);
+	OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
+	OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
+	OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
+	OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
+	OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
+	OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
+	OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
+	OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
+	OFFSET(__LC_USER_ASCE, lowcore, user_asce);
+	OFFSET(__LC_LPP, lowcore, lpp);
+	OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
+	OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
+	OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
+	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+	OFFSET(__LC_GMAP, lowcore, gmap);
+	OFFSET(__LC_PASTE, lowcore, paste);
 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
-	OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
+	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
 	/* hardware defined lowcore locations 0x1000 - 0x18ff */
-	OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr);
-	OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2);
-	OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area);
-	OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area);
-	OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area);
-	OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
-	OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
-	OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
-	OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
-	OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
-	OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
-	OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area);
-	OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb);
+	OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr);
+	OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
+	OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
+	OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
+	OFFSET(__LC_PSW_SAVE_AREA, lowcore, psw_save_area);
+	OFFSET(__LC_PREFIX_SAVE_AREA, lowcore, prefixreg_save_area);
+	OFFSET(__LC_FP_CREG_SAVE_AREA, lowcore, fpt_creg_save_area);
+	OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
+	OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
+	OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
+	OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
+	OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
+	OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
 	BLANK();
 	/* gmap/sie offsets */
 	OFFSET(__GMAP_ASCE, gmap, asce);

+ 201 - 253
arch/s390/kernel/crash_dump.c

@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/bootmem.h>
 #include <linux/elf.h>
+#include <asm/asm-offsets.h>
 #include <linux/memblock.h>
 #include <asm/os_info.h>
 #include <asm/elf.h>
@@ -32,7 +33,84 @@ static struct memblock_type oldmem_type = {
 	.regions = &oldmem_region,
 };
 
-struct dump_save_areas dump_save_areas;
+struct save_area {
+	struct list_head list;
+	u64 psw[2];
+	u64 ctrs[16];
+	u64 gprs[16];
+	u32 acrs[16];
+	u64 fprs[16];
+	u32 fpc;
+	u32 prefix;
+	u64 todpreg;
+	u64 timer;
+	u64 todcmp;
+	u64 vxrs_low[16];
+	__vector128 vxrs_high[16];
+};
+
+static LIST_HEAD(dump_save_areas);
+
+/*
+ * Allocate a save area
+ */
+struct save_area * __init save_area_alloc(bool is_boot_cpu)
+{
+	struct save_area *sa;
+
+	sa = (void *) memblock_alloc(sizeof(*sa), 8);
+	if (!sa)
+		return NULL;
+	if (is_boot_cpu)
+		list_add(&sa->list, &dump_save_areas);
+	else
+		list_add_tail(&sa->list, &dump_save_areas);
+	return sa;
+}
+
+/*
+ * Return the address of the save area for the boot CPU
+ */
+struct save_area * __init save_area_boot_cpu(void)
+{
+	if (list_empty(&dump_save_areas))
+		return NULL;
+	return list_first_entry(&dump_save_areas, struct save_area, list);
+}
+
+/*
+ * Copy CPU registers into the save area
+ */
+void __init save_area_add_regs(struct save_area *sa, void *regs)
+{
+	struct lowcore *lc;
+
+	lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
+	memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw));
+	memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs));
+	memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs));
+	memcpy(&sa->acrs, &lc->access_regs_save_area, sizeof(sa->acrs));
+	memcpy(&sa->fprs, &lc->floating_pt_save_area, sizeof(sa->fprs));
+	memcpy(&sa->fpc, &lc->fpt_creg_save_area, sizeof(sa->fpc));
+	memcpy(&sa->prefix, &lc->prefixreg_save_area, sizeof(sa->prefix));
+	memcpy(&sa->todpreg, &lc->tod_progreg_save_area, sizeof(sa->todpreg));
+	memcpy(&sa->timer, &lc->cpu_timer_save_area, sizeof(sa->timer));
+	memcpy(&sa->todcmp, &lc->clock_comp_save_area, sizeof(sa->todcmp));
+}
+
+/*
+ * Copy vector registers into the save area
+ */
+void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs)
+{
+	int i;
+
+	/* Copy lower halves of vector registers 0-15 */
+	for (i = 0; i < 16; i++)
+		memcpy(&sa->vxrs_low[i], &vxrs[i].u[2], 8);
+	/* Copy vector registers 16-31 */
+	memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128));
+}
 
 /*
  * Return physical address for virtual address
@@ -51,79 +129,85 @@ static inline void *load_real_addr(void *addr)
 }
 
 /*
- * Copy real to virtual or real memory
- */
-static int copy_from_realmem(void *dest, void *src, size_t count)
-{
-	unsigned long size;
-
-	if (!count)
-		return 0;
-	if (!is_vmalloc_or_module_addr(dest))
-		return memcpy_real(dest, src, count);
-	do {
-		size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
-		if (memcpy_real(load_real_addr(dest), src, size))
-			return -EFAULT;
-		count -= size;
-		dest += size;
-		src += size;
-	} while (count);
-	return 0;
-}
-
-/*
- * Pointer to ELF header in new kernel
- */
-static void *elfcorehdr_newmem;
-
-/*
- * Copy one page from zfcpdump "oldmem"
- *
- * For pages below HSA size memory from the HSA is copied. Otherwise
- * real memory copy is used.
+ * Copy memory of the old, dumped system to a kernel space virtual address
  */
-static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
-					 unsigned long src, int userbuf)
+int copy_oldmem_kernel(void *dst, void *src, size_t count)
 {
+	unsigned long from, len;
+	void *ra;
 	int rc;
 
-	if (src < sclp.hsa_size) {
-		rc = memcpy_hsa(buf, src, csize, userbuf);
-	} else {
-		if (userbuf)
-			rc = copy_to_user_real((void __force __user *) buf,
-					       (void *) src, csize);
-		else
-			rc = memcpy_real(buf, (void *) src, csize);
+	while (count) {
+		from = __pa(src);
+		if (!OLDMEM_BASE && from < sclp.hsa_size) {
+			/* Copy from zfcpdump HSA area */
+			len = min(count, sclp.hsa_size - from);
+			rc = memcpy_hsa_kernel(dst, from, len);
+			if (rc)
+				return rc;
+		} else {
+			/* Check for swapped kdump oldmem areas */
+			if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
+				from -= OLDMEM_BASE;
+				len = min(count, OLDMEM_SIZE - from);
+			} else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
+				len = min(count, OLDMEM_SIZE - from);
+				from += OLDMEM_BASE;
+			} else {
+				len = count;
+			}
+			if (is_vmalloc_or_module_addr(dst)) {
+				ra = load_real_addr(dst);
+				len = min(PAGE_SIZE - offset_in_page(ra), len);
+			} else {
+				ra = dst;
+			}
+			if (memcpy_real(ra, (void *) from, len))
+				return -EFAULT;
+		}
+		dst += len;
+		src += len;
+		count -= len;
 	}
-	return rc ? rc : csize;
+	return 0;
 }
 
 /*
- * Copy one page from kdump "oldmem"
- *
- * For the kdump reserved memory this functions performs a swap operation:
- *  - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
- *  - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
+ * Copy memory of the old, dumped system to a user space virtual address
  */
-static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
-				      unsigned long src, int userbuf)
-
+int copy_oldmem_user(void __user *dst, void *src, size_t count)
 {
+	unsigned long from, len;
 	int rc;
 
-	if (src < OLDMEM_SIZE)
-		src += OLDMEM_BASE;
-	else if (src > OLDMEM_BASE &&
-		 src < OLDMEM_BASE + OLDMEM_SIZE)
-		src -= OLDMEM_BASE;
-	if (userbuf)
-		rc = copy_to_user_real((void __force __user *) buf,
-				       (void *) src, csize);
-	else
-		rc = copy_from_realmem(buf, (void *) src, csize);
-	return (rc == 0) ? rc : csize;
+	while (count) {
+		from = __pa(src);
+		if (!OLDMEM_BASE && from < sclp.hsa_size) {
+			/* Copy from zfcpdump HSA area */
+			len = min(count, sclp.hsa_size - from);
+			rc = memcpy_hsa_user(dst, from, len);
+			if (rc)
+				return rc;
+		} else {
+			/* Check for swapped kdump oldmem areas */
+			if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
+				from -= OLDMEM_BASE;
+				len = min(count, OLDMEM_SIZE - from);
+			} else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
+				len = min(count, OLDMEM_SIZE - from);
+				from += OLDMEM_BASE;
+			} else {
+				len = count;
+			}
+			rc = copy_to_user_real(dst, (void *) from, count);
+			if (rc)
+				return rc;
+		}
+		dst += len;
+		src += len;
+		count -= len;
+	}
+	return 0;
 }
 
 /*
@@ -132,15 +216,17 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
 			 unsigned long offset, int userbuf)
 {
-	unsigned long src;
+	void *src;
+	int rc;
 
 	if (!csize)
 		return 0;
-	src = (pfn << PAGE_SHIFT) + offset;
-	if (OLDMEM_BASE)
-		return copy_oldmem_page_kdump(buf, csize, src, userbuf);
+	src = (void *) (pfn << PAGE_SHIFT) + offset;
+	if (userbuf)
+		rc = copy_oldmem_user((void __force __user *) buf, src, csize);
 	else
-		return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
+		rc = copy_oldmem_kernel((void *) buf, src, csize);
+	return rc;
 }
 
 /*
@@ -208,33 +294,6 @@ int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
 						       prot);
 }
 
-/*
- * Copy memory from old kernel
- */
-int copy_from_oldmem(void *dest, void *src, size_t count)
-{
-	unsigned long copied = 0;
-	int rc;
-
-	if (OLDMEM_BASE) {
-		if ((unsigned long) src < OLDMEM_SIZE) {
-			copied = min(count, OLDMEM_SIZE - (unsigned long) src);
-			rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
-			if (rc)
-				return rc;
-		}
-	} else {
-		unsigned long hsa_end = sclp.hsa_size;
-		if ((unsigned long) src < hsa_end) {
-			copied = min(count, hsa_end - (unsigned long) src);
-			rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
-			if (rc)
-				return rc;
-		}
-	}
-	return copy_from_realmem(dest + copied, src + copied, count - copied);
-}
-
 /*
  * Alloc memory and panic in case of ENOMEM
  */
@@ -251,8 +310,8 @@ static void *kzalloc_panic(int len)
 /*
  * Initialize ELF note
  */
-static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
-		     const char *name)
+static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
+			  const char *name)
 {
 	Elf64_Nhdr *note;
 	u64 len;
@@ -272,136 +331,42 @@ static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
 	return PTR_ADD(buf, len);
 }
 
-/*
- * Initialize prstatus note
- */
-static void *nt_prstatus(void *ptr, struct save_area *sa)
+static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
 {
-	struct elf_prstatus nt_prstatus;
-	static int cpu_nr = 1;
-
-	memset(&nt_prstatus, 0, sizeof(nt_prstatus));
-	memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
-	memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
-	memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
-	nt_prstatus.pr_pid = cpu_nr;
-	cpu_nr++;
-
-	return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
-			 "CORE");
+	return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME);
 }
 
 /*
- * Initialize fpregset (floating point) note
+ * Fill ELF notes for one CPU with save area registers
  */
-static void *nt_fpregset(void *ptr, struct save_area *sa)
+static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
 {
+	struct elf_prstatus nt_prstatus;
 	elf_fpregset_t nt_fpregset;
 
+	/* Prepare prstatus note */
+	memset(&nt_prstatus, 0, sizeof(nt_prstatus));
+	memcpy(&nt_prstatus.pr_reg.gprs, sa->gprs, sizeof(sa->gprs));
+	memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
+	memcpy(&nt_prstatus.pr_reg.acrs, sa->acrs, sizeof(sa->acrs));
+	nt_prstatus.pr_pid = cpu;
+	/* Prepare fpregset (floating point) note */
 	memset(&nt_fpregset, 0, sizeof(nt_fpregset));
-	memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
-	memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
-
-	return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
-		       "CORE");
-}
-
-/*
- * Initialize timer note
- */
-static void *nt_s390_timer(void *ptr, struct save_area *sa)
-{
-	return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
-			 KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize TOD clock comparator note
- */
-static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
-{
-	return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
-		       sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize TOD programmable register note
- */
-static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
-{
-	return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
-		       sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize control register note
- */
-static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
-{
-	return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
-		       sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize prefix register note
- */
-static void *nt_s390_prefix(void *ptr, struct save_area *sa)
-{
-	return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
-			 sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize vxrs high note (full 128 bit VX registers 16-31)
- */
-static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
-{
-	return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
-		       16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
-}
-
-/*
- * Initialize vxrs low note (lower halves of VX registers 0-15)
- */
-static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
-{
-	Elf64_Nhdr *note;
-	u64 len;
-	int i;
-
-	note = (Elf64_Nhdr *)ptr;
-	note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
-	note->n_descsz = 16 * 8;
-	note->n_type = NT_S390_VXRS_LOW;
-	len = sizeof(Elf64_Nhdr);
-
-	memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
-	len = roundup(len + note->n_namesz, 4);
-
-	ptr += len;
-	/* Copy lower halves of SIMD registers 0-15 */
-	for (i = 0; i < 16; i++) {
-		memcpy(ptr, &vx_regs[i].u[2], 8);
-		ptr += 8;
-	}
-	return ptr;
-}
-
-/*
- * Fill ELF notes for one CPU with save area registers
- */
-void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
-{
-	ptr = nt_prstatus(ptr, sa);
-	ptr = nt_fpregset(ptr, sa);
-	ptr = nt_s390_timer(ptr, sa);
-	ptr = nt_s390_tod_cmp(ptr, sa);
-	ptr = nt_s390_tod_preg(ptr, sa);
-	ptr = nt_s390_ctrs(ptr, sa);
-	ptr = nt_s390_prefix(ptr, sa);
-	if (MACHINE_HAS_VX && vx_regs) {
-		ptr = nt_s390_vx_low(ptr, vx_regs);
-		ptr = nt_s390_vx_high(ptr, vx_regs);
+	memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
+	memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs));
+	/* Create ELF notes for the CPU */
+	ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus));
+	ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset));
+	ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer));
+	ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp));
+	ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
+	ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
+	ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
+	if (MACHINE_HAS_VX) {
+		ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
+			      &sa->vxrs_high, sizeof(sa->vxrs_high));
+		ptr = nt_init(ptr, NT_S390_VXRS_LOW,
+			      &sa->vxrs_low, sizeof(sa->vxrs_low));
 	}
 	return ptr;
 }
@@ -416,8 +381,7 @@ static void *nt_prpsinfo(void *ptr)
 	memset(&prpsinfo, 0, sizeof(prpsinfo));
 	prpsinfo.pr_sname = 'R';
 	strcpy(prpsinfo.pr_fname, "vmlinux");
-	return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
-		       KEXEC_CORE_NOTE_NAME);
+	return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo));
 }
 
 /*
@@ -429,17 +393,18 @@ static void *get_vmcoreinfo_old(unsigned long *size)
 	Elf64_Nhdr note;
 	void *addr;
 
-	if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+	if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
 		return NULL;
 	memset(nt_name, 0, sizeof(nt_name));
-	if (copy_from_oldmem(&note, addr, sizeof(note)))
+	if (copy_oldmem_kernel(&note, addr, sizeof(note)))
 		return NULL;
-	if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
+	if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+			       sizeof(nt_name) - 1))
 		return NULL;
 	if (strcmp(nt_name, "VMCOREINFO") != 0)
 		return NULL;
 	vmcoreinfo = kzalloc_panic(note.n_descsz);
-	if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
+	if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
 		return NULL;
 	*size = note.n_descsz;
 	return vmcoreinfo;
@@ -458,7 +423,7 @@ static void *nt_vmcoreinfo(void *ptr)
 		vmcoreinfo = get_vmcoreinfo_old(&size);
 	if (!vmcoreinfo)
 		return ptr;
-	return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
+	return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
 }
 
 /*
@@ -487,13 +452,12 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  */
 static int get_cpu_cnt(void)
 {
-	int i, cpus = 0;
+	struct save_area *sa;
+	int cpus = 0;
 
-	for (i = 0; i < dump_save_areas.count; i++) {
-		if (dump_save_areas.areas[i]->sa.pref_reg == 0)
-			continue;
-		cpus++;
-	}
+	list_for_each_entry(sa, &dump_save_areas, list)
+		if (sa->prefix != 0)
+			cpus++;
 	return cpus;
 }
 
@@ -538,18 +502,16 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  */
 static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 {
-	struct save_area_ext *sa_ext;
+	struct save_area *sa;
 	void *ptr_start = ptr;
-	int i;
+	int cpu;
 
 	ptr = nt_prpsinfo(ptr);
 
-	for (i = 0; i < dump_save_areas.count; i++) {
-		sa_ext = dump_save_areas.areas[i];
-		if (sa_ext->sa.pref_reg == 0)
-			continue;
-		ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
-	}
+	cpu = 1;
+	list_for_each_entry(sa, &dump_save_areas, list)
+		if (sa->prefix != 0)
+			ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
 	ptr = nt_vmcoreinfo(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
@@ -573,9 +535,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 	/* If we are not in kdump or zfcpdump mode return */
 	if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
 		return 0;
-	/* If elfcorehdr= has been passed via cmdline, we use that one */
-	if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
-		return 0;
 	/* If we cannot get HSA size for zfcpdump return error */
 	if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size)
 		return -ENODEV;
@@ -606,7 +565,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 	hdr_off = PTR_DIFF(ptr, hdr);
 	loads_init(phdr_loads, hdr_off);
 	*addr = (unsigned long long) hdr;
-	elfcorehdr_newmem = hdr;
 	*size = (unsigned long long) hdr_off;
 	BUG_ON(elfcorehdr_size > alloc_size);
 	return 0;
@@ -617,8 +575,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  */
 void elfcorehdr_free(unsigned long long addr)
 {
-	if (!elfcorehdr_newmem)
-		return;
 	kfree((void *)(unsigned long)addr);
 }
 
@@ -629,7 +585,6 @@ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 {
 	void *src = (void *)(unsigned long)*ppos;
 
-	src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
 	memcpy(buf, src, count);
 	*ppos += count;
 	return count;
@@ -641,15 +596,8 @@ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 {
 	void *src = (void *)(unsigned long)*ppos;
-	int rc;
 
-	if (elfcorehdr_newmem) {
-		memcpy(buf, src, count);
-	} else {
-		rc = copy_from_oldmem(buf, src, count);
-		if (rc)
-			return rc;
-	}
+	memcpy(buf, src, count);
 	*ppos += count;
 	return count;
 }

+ 2 - 2
arch/s390/kernel/dis.c

@@ -2022,7 +2022,7 @@ void show_code(struct pt_regs *regs)
 			*ptr++ = '\t';
 		ptr += print_insn(ptr, code + start, addr);
 		start += opsize;
-		printk(buffer);
+		printk("%s", buffer);
 		ptr = buffer;
 		ptr += sprintf(ptr, "\n          ");
 		hops++;
@@ -2049,7 +2049,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
 		ptr += print_insn(ptr, code, (unsigned long) code);
 		*ptr++ = '\n';
 		*ptr++ = 0;
-		printk(buffer);
+		printk("%s", buffer);
 		code += opsize;
 		len -= opsize;
 	}

+ 9 - 0
arch/s390/kernel/early.c

@@ -335,6 +335,14 @@ static __init void detect_machine_facilities(void)
 	}
 }
 
+static inline void save_vector_registers(void)
+{
+#ifdef CONFIG_CRASH_DUMP
+	if (test_facility(129))
+		save_vx_regs(boot_cpu_vector_save_area);
+#endif
+}
+
 static int __init disable_vector_extension(char *str)
 {
 	S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@@ -451,6 +459,7 @@ void __init startup_init(void)
 	detect_diag9c();
 	detect_diag44();
 	detect_machine_facilities();
+	save_vector_registers();
 	setup_topology();
 	sclp_early_detect();
 	lockdep_on();

+ 2 - 0
arch/s390/kernel/entry.S

@@ -764,6 +764,7 @@ ENTRY(psw_idle)
 	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
 .Lpsw_idle_stcctm:
 #endif
+	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1146,6 +1147,7 @@ cleanup_critical:
 	.quad	.Lio_done - 4
 
 .Lcleanup_idle:
+	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
 	# copy interrupt clock & cpu timer
 	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
 	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER

+ 17 - 30
arch/s390/kernel/head.S

@@ -25,6 +25,7 @@
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
+#include <asm/facility.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 
@@ -300,27 +301,27 @@ ENTRY(startup_kdump)
 	xc	0x200(256),0x200	# partially clear lowcore
 	xc	0x300(256),0x300
 	xc	0xe00(256),0xe00
+	xc	0xf00(256),0xf00
 	lctlg	%c0,%c15,0x200(%r0)	# initialize control registers
 	stck	__LC_LAST_UPDATE_CLOCK
 	spt	6f-.LPG0(%r13)
 	mvc	__LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
-	xc	__LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
-	# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
-	.insn	s,0xb2b10000,0		# store facilities @ __LC_STFL_FAC_LIST
-	tm	__LC_STFL_FAC_LIST,0x01	# stfle available ?
+	stfl	0(%r0)			# store facilities @ __LC_STFL_FAC_LIST
+	mvc	__LC_STFLE_FAC_LIST(4),__LC_STFL_FAC_LIST
+	tm	__LC_STFLE_FAC_LIST,0x01	# stfle available ?
 	jz	0f
-	la	%r0,1
-	.insn	s,0xb2b00000,__LC_STFL_FAC_LIST	# store facility list extended
+	lghi	%r0,FACILITIES_ALS_DWORDS-1
+	.insn	s,0xb2b00000,__LC_STFLE_FAC_LIST # store facility list extended
 	# verify if all required facilities are supported by the machine
-0:	la	%r1,__LC_STFL_FAC_LIST
+0:	la	%r1,__LC_STFLE_FAC_LIST
 	la	%r2,3f+8-.LPG0(%r13)
-	l	%r3,0(%r2)
-1:	l	%r0,0(%r1)
-	n	%r0,4(%r2)
-	cl	%r0,4(%r2)
+	lhi	%r3,FACILITIES_ALS_DWORDS
+1:	lg	%r0,0(%r1)
+	ng	%r0,0(%r2)
+	clg	%r0,0(%r2)
 	jne	2f
-	la	%r1,4(%r1)
-	la	%r2,4(%r2)
+	la	%r1,8(%r1)
+	la	%r2,8(%r2)
 	ahi	%r3,-1
 	jnz	1b
 	j	4f
@@ -340,24 +341,10 @@ ENTRY(startup_kdump)
 3:	.long	0x000a0000,0x8badcccc
 
 # List of facilities that are required. If not all facilities are present
-# the kernel will crash. Format is number of facility words with bits set,
-# followed by the facility words.
+# the kernel will crash.
+
+	.quad FACILITIES_ALS
 
-#if defined(CONFIG_MARCH_Z13)
-	.long 2, 0xc100eff2, 0xf46cc800
-#elif defined(CONFIG_MARCH_ZEC12)
-	.long 2, 0xc100eff2, 0xf46cc800
-#elif defined(CONFIG_MARCH_Z196)
-	.long 2, 0xc100eff2, 0xf46c0000
-#elif defined(CONFIG_MARCH_Z10)
-	.long 2, 0xc100eff2, 0xf0680000
-#elif defined(CONFIG_MARCH_Z9_109)
-	.long 1, 0xc100efc2
-#elif defined(CONFIG_MARCH_Z990)
-	.long 1, 0xc0002000
-#elif defined(CONFIG_MARCH_Z900)
-	.long 1, 0xc0000000
-#endif
 4:
 	/* Continue with startup code in head64.S */
 	jg	startup_continue

+ 1 - 1
arch/s390/kernel/head64.S

@@ -16,7 +16,7 @@
 
 __HEAD
 ENTRY(startup_continue)
-	tm	__LC_STFL_FAC_LIST+6,0x80	# LPP available ?
+	tm	__LC_STFLE_FAC_LIST+6,0x80	# LPP available ?
 	jz	0f
 	xc	__LC_LPP+1(7,0),__LC_LPP+1	# clear lpp and current_pid
 	mvi	__LC_LPP,0x80			#   and set LPP_MAGIC

+ 3 - 18
arch/s390/kernel/ipl.c

@@ -2039,21 +2039,15 @@ static void do_reset_calls(void)
 		reset->fn();
 }
 
-u32 dump_prefix_page;
-
-void s390_reset_system(void (*fn_pre)(void),
-		       void (*fn_post)(void *), void *data)
+void s390_reset_system(void)
 {
-	struct _lowcore *lc;
+	struct lowcore *lc;
 
-	lc = (struct _lowcore *)(unsigned long) store_prefix();
+	lc = (struct lowcore *)(unsigned long) store_prefix();
 
 	/* Stack for interrupt/machine check handler */
 	lc->panic_stack = S390_lowcore.panic_stack;
 
-	/* Save prefix page address for dump case */
-	dump_prefix_page = (u32)(unsigned long) lc;
-
 	/* Disable prefixing */
 	set_prefix(0);
 
@@ -2077,14 +2071,5 @@ void s390_reset_system(void (*fn_pre)(void),
 	S390_lowcore.subchannel_id = 0;
 	S390_lowcore.subchannel_nr = 0;
 
-	/* Store status at absolute zero */
-	store_status();
-
-	/* Call function before reset */
-	if (fn_pre)
-		fn_pre();
 	do_reset_calls();
-	/* Call function after reset */
-	if (fn_post)
-		fn_post(data);
 }

+ 61 - 49
arch/s390/kernel/machine_kexec.c

@@ -34,46 +34,6 @@ extern const unsigned long long relocate_kernel_len;
 
 #ifdef CONFIG_CRASH_DUMP
 
-/*
- * Create ELF notes for one CPU
- */
-static void add_elf_notes(int cpu)
-{
-	struct save_area *sa = (void *) 4608 + store_prefix();
-	void *ptr;
-
-	memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
-	ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
-	ptr = fill_cpu_elf_notes(ptr, sa, NULL);
-	memset(ptr, 0, sizeof(struct elf_note));
-}
-
-/*
- * Initialize CPU ELF notes
- */
-static void setup_regs(void)
-{
-	unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
-	struct _lowcore *lc;
-	int cpu, this_cpu;
-
-	/* Get lowcore pointer from store status of this CPU (absolute zero) */
-	lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
-	this_cpu = smp_find_processor_id(stap());
-	add_elf_notes(this_cpu);
-	for_each_online_cpu(cpu) {
-		if (cpu == this_cpu)
-			continue;
-		if (smp_store_status(cpu))
-			continue;
-		add_elf_notes(cpu);
-	}
-	if (MACHINE_HAS_VX)
-		save_vx_regs_safe((void *) lc->vector_save_area_addr);
-	/* Copy dump CPU store status info to absolute zero */
-	memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
-}
-
 /*
  * PM notifier callback for kdump
  */
@@ -105,14 +65,66 @@ static int __init machine_kdump_pm_init(void)
 arch_initcall(machine_kdump_pm_init);
 
 /*
- * Start kdump: We expect here that a store status has been done on our CPU
+ * Reset the system, copy boot CPU registers to absolute zero,
+ * and jump to the kdump image
  */
 static void __do_machine_kdump(void *image)
 {
-	int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
+	int (*start_kdump)(int);
+	unsigned long prefix;
+
+	/* store_status() saved the prefix register to lowcore */
+	prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
+
+	/* Now do the reset  */
+	s390_reset_system();
+
+	/*
+	 * Copy dump CPU store status info to absolute zero.
+	 * This need to be done *after* s390_reset_system set the
+	 * prefix register of this CPU to zero
+	 */
+	memcpy((void *) __LC_FPREGS_SAVE_AREA,
+	       (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
 
 	__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
+	start_kdump = (void *)((struct kimage *) image)->start;
 	start_kdump(1);
+
+	/* Die if start_kdump returns */
+	disabled_wait((unsigned long) __builtin_return_address(0));
+}
+
+/*
+ * Start kdump: create a LGR log entry, store status of all CPUs and
+ * branch to __do_machine_kdump.
+ */
+static noinline void __machine_kdump(void *image)
+{
+	int this_cpu, cpu;
+
+	lgr_info_log();
+	/* Get status of the other CPUs */
+	this_cpu = smp_find_processor_id(stap());
+	for_each_online_cpu(cpu) {
+		if (cpu == this_cpu)
+			continue;
+		if (smp_store_status(cpu))
+			continue;
+	}
+	/* Store status of the boot CPU */
+	if (MACHINE_HAS_VX)
+		save_vx_regs((void *) &S390_lowcore.vector_save_area);
+	/*
+	 * To create a good backchain for this CPU in the dump store_status
+	 * is passed the address of a function. The address is saved into
+	 * the PSW save area of the boot CPU and the function is invoked as
+	 * a tail call of store_status. The backchain in the dump will look
+	 * like this:
+	 *   restart_int_handler ->  __machine_kexec -> __do_machine_kdump
+	 * The call to store_status() will not return.
+	 */
+	store_status(__do_machine_kdump, image);
 }
 #endif
 
@@ -235,10 +247,14 @@ static void __do_machine_kexec(void *data)
 	relocate_kernel_t data_mover;
 	struct kimage *image = data;
 
+	s390_reset_system();
 	data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
 
 	/* Call the moving routine */
 	(*data_mover)(&image->head, image->start);
+
+	/* Die if kexec returns */
+	disabled_wait((unsigned long) __builtin_return_address(0));
 }
 
 /*
@@ -251,14 +267,10 @@ static void __machine_kexec(void *data)
 	tracing_off();
 	debug_locks_off();
 #ifdef CONFIG_CRASH_DUMP
-	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
-
-		lgr_info_log();
-		s390_reset_system(setup_regs, __do_machine_kdump, data);
-	} else
+	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
+		__machine_kdump(data);
 #endif
-		s390_reset_system(NULL, __do_machine_kexec, data);
-	disabled_wait((unsigned long) __builtin_return_address(0));
+	__do_machine_kexec(data);
 }
 
 /*

+ 4 - 3
arch/s390/kernel/os_info.c

@@ -89,7 +89,7 @@ static void os_info_old_alloc(int nr, int align)
 		goto fail;
 	}
 	buf_align = PTR_ALIGN(buf, align);
-	if (copy_from_oldmem(buf_align, (void *) addr, size)) {
+	if (copy_oldmem_kernel(buf_align, (void *) addr, size)) {
 		msg = "copy failed";
 		goto fail_free;
 	}
@@ -122,14 +122,15 @@ static void os_info_old_init(void)
 		return;
 	if (!OLDMEM_BASE)
 		goto fail;
-	if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr)))
+	if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
 		goto fail;
 	if (addr == 0 || addr % PAGE_SIZE)
 		goto fail;
 	os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
 	if (!os_info_old)
 		goto fail;
-	if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old)))
+	if (copy_oldmem_kernel(os_info_old, (void *) addr,
+			       sizeof(*os_info_old)))
 		goto fail_free;
 	if (os_info_old->magic != OS_INFO_MAGIC)
 		goto fail_free;

+ 51 - 43
arch/s390/kernel/reipl.S

@@ -9,60 +9,66 @@
 #include <asm/sigp.h>
 
 #
-# store_status
+# Issue "store status" for the current CPU to its prefix page
+# and call passed function afterwards
 #
-# Prerequisites to run this function:
-# - Prefix register is set to zero
-# - Original prefix register is stored in "dump_prefix_page"
-# - Lowcore protection is off
+# r2 = Function to be called after store status
+# r3 = Parameter for function
 #
 ENTRY(store_status)
 	/* Save register one and load save area base */
 	stg	%r1,__LC_SAVE_AREA_RESTART
-	lghi	%r1,SAVE_AREA_BASE
 	/* General purpose registers */
-	stmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	lg	%r2,__LC_SAVE_AREA_RESTART
-	stg	%r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+	lghi	%r1,__LC_GPREGS_SAVE_AREA
+	stmg	%r0,%r15,0(%r1)
+	mvc	8(8,%r1),__LC_SAVE_AREA_RESTART
 	/* Control registers */
-	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	lghi	%r1,__LC_CREGS_SAVE_AREA
+	stctg	%c0,%c15,0(%r1)
 	/* Access registers */
-	stam	%a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	lghi	%r1,__LC_AREGS_SAVE_AREA
+	stam	%a0,%a15,0(%r1)
 	/* Floating point registers */
-	std	%f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	std	%f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	lghi	%r1,__LC_FPREGS_SAVE_AREA
+	std	%f0, 0x00(%r1)
+	std	%f1, 0x08(%r1)
+	std	%f2, 0x10(%r1)
+	std	%f3, 0x18(%r1)
+	std	%f4, 0x20(%r1)
+	std	%f5, 0x28(%r1)
+	std	%f6, 0x30(%r1)
+	std	%f7, 0x38(%r1)
+	std	%f8, 0x40(%r1)
+	std	%f9, 0x48(%r1)
+	std	%f10,0x50(%r1)
+	std	%f11,0x58(%r1)
+	std	%f12,0x60(%r1)
+	std	%f13,0x68(%r1)
+	std	%f14,0x70(%r1)
+	std	%f15,0x78(%r1)
 	/* Floating point control register */
-	stfpc	__LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+	lghi	%r1,__LC_FP_CREG_SAVE_AREA
+	stfpc	0(%r1)
 	/* CPU timer */
-	stpt	__LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
-	/* Saved prefix register */
-	larl	%r2,dump_prefix_page
-	mvc	__LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+	lghi	%r1,__LC_CPU_TIMER_SAVE_AREA
+	stpt	0(%r1)
+	/* Store prefix register */
+	lghi	%r1,__LC_PREFIX_SAVE_AREA
+	stpx	0(%r1)
 	/* Clock comparator - seven bytes */
-	larl	%r2,.Lclkcmp
-	stckc	0(%r2)
-	mvc	__LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+	lghi	%r1,__LC_CLOCK_COMP_SAVE_AREA
+	larl	%r4,.Lclkcmp
+	stckc	0(%r4)
+	mvc	1(7,%r1),1(%r4)
 	/* Program status word */
-	epsw	%r2,%r3
-	st	%r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
-	st	%r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
-	larl	%r2,store_status
-	stg	%r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
-	br	%r14
+	lghi	%r1,__LC_PSW_SAVE_AREA
+	epsw	%r4,%r5
+	st	%r4,0(%r1)
+	st	%r5,4(%r1)
+	stg	%r2,8(%r1)
+	lgr	%r1,%r2
+	lgr	%r2,%r3
+	br	%r1
 
 	.section .bss
 	.align	8
@@ -77,9 +83,11 @@ ENTRY(store_status)
 ENTRY(do_reipl_asm)
 		basr	%r13,0
 .Lpg0:		lpswe	.Lnewpsw-.Lpg0(%r13)
-.Lpg1:		brasl	%r14,store_status
+.Lpg1:		lgr	%r3,%r2
+		larl	%r2,.Lstatus
+		brasl	%r14,store_status
 
-		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
+.Lstatus:	lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
 		lgr	%r1,%r2
 		mvc	__LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
 		stsch	.Lschib-.Lpg0(%r13)

+ 50 - 15
arch/s390/kernel/sclp.c

@@ -9,7 +9,11 @@
 #include <asm/processor.h>
 #include <asm/sclp.h>
 
+#define EVTYP_VT220MSG_MASK	0x00000040
+#define EVTYP_MSG_MASK		0x40000000
+
 static char _sclp_work_area[4096] __aligned(PAGE_SIZE);
+static bool have_vt220, have_linemode;
 
 static void _sclp_wait_int(void)
 {
@@ -68,7 +72,7 @@ static int _sclp_setup(int disable)
 		0x00, 0x1c,
 		0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
 		0x00, 0x04,
-		0x80, 0x00, 0x00, 0x00,	0x40, 0x00, 0x00, 0x00,
+		0x80, 0x00, 0x00, 0x00,	0x40, 0x00, 0x00, 0x40,
 		0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00
 	};
 	unsigned int *masks;
@@ -82,13 +86,13 @@ static int _sclp_setup(int disable)
 	rc = _sclp_servc(0x00780005, _sclp_work_area);
 	if (rc)
 		return rc;
-	if ((masks[0] & masks[3]) != masks[0] ||
-	    (masks[1] & masks[2]) != masks[1])
-		return -EIO;
+	have_vt220 = masks[2] & EVTYP_VT220MSG_MASK;
+	have_linemode = masks[2] & EVTYP_MSG_MASK;
 	return 0;
 }
 
-static int _sclp_print(const char *str)
+/* Output multi-line text using SCLP Message interface. */
+static void _sclp_print_lm(const char *str)
 {
 	static unsigned char write_head[] = {
 		/* sccb header */
@@ -143,18 +147,49 @@ static int _sclp_print(const char *str)
 	} while (ch != 0);
 
 	/* SCLP write data */
-	return _sclp_servc(0x00760005, _sclp_work_area);
+	_sclp_servc(0x00760005, _sclp_work_area);
 }
 
-int _sclp_print_early(const char *str)
+/* Output multi-line text (plus a newline) using SCLP VT220
+ * interface.
+ */
+static void _sclp_print_vt220(const char *str)
 {
-	int rc;
+	static unsigned char const write_head[] = {
+		/* sccb header */
+		0x00, 0x0e,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/* evbuf header */
+		0x00, 0x06,
+		0x1a, 0x00, 0x00, 0x00,
+	};
+	size_t len = strlen(str);
 
-	rc = _sclp_setup(0);
-	if (rc)
-		return rc;
-	rc = _sclp_print(str);
-	if (rc)
-		return rc;
-	return _sclp_setup(1);
+	if (sizeof(write_head) + len >= sizeof(_sclp_work_area))
+		len = sizeof(_sclp_work_area) - sizeof(write_head) - 1;
+
+	memcpy(_sclp_work_area, write_head, sizeof(write_head));
+	memcpy(_sclp_work_area + sizeof(write_head), str, len);
+	_sclp_work_area[sizeof(write_head) + len] = '\n';
+
+	/* Update length fields in evbuf and sccb headers */
+	*(unsigned short *)(_sclp_work_area + 8) += len + 1;
+	*(unsigned short *)(_sclp_work_area + 0) += len + 1;
+
+	/* SCLP write data */
+	(void)_sclp_servc(0x00760005, _sclp_work_area);
+}
+
+/* Output one or more lines of text on the SCLP console (VT220 and /
+ * or line-mode). All lines get terminated; no need for a trailing LF.
+ */
+void _sclp_print_early(const char *str)
+{
+	if (_sclp_setup(0) != 0)
+		return;
+	if (have_linemode)
+		_sclp_print_lm(str);
+	if (have_vt220)
+		_sclp_print_vt220(str);
+	_sclp_setup(1);
 }

+ 10 - 13
arch/s390/kernel/setup.c

@@ -99,7 +99,7 @@ unsigned long MODULES_VADDR;
 unsigned long MODULES_END;
 
 /* An array with a pointer to the lowcore of every CPU. */
-struct _lowcore *lowcore_ptr[NR_CPUS];
+struct lowcore *lowcore_ptr[NR_CPUS];
 EXPORT_SYMBOL(lowcore_ptr);
 
 /*
@@ -293,12 +293,12 @@ void *restart_stack __attribute__((__section__(".data")));
 
 static void __init setup_lowcore(void)
 {
-	struct _lowcore *lc;
+	struct lowcore *lc;
 
 	/*
 	 * Setup lowcore for boot cpu
 	 */
-	BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
+	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc->restart_psw.mask = PSW_KERNEL_BITS;
 	lc->restart_psw.addr =
@@ -663,15 +663,6 @@ static void __init reserve_kernel(void)
 #endif
 }
 
-static void __init reserve_elfcorehdr(void)
-{
-#ifdef CONFIG_CRASH_DUMP
-	if (is_kdump_kernel())
-		memblock_reserve(elfcorehdr_addr - OLDMEM_BASE,
-				 PAGE_ALIGN(elfcorehdr_size));
-#endif
-}
-
 static void __init setup_memory(void)
 {
 	struct memblock_region *reg;
@@ -850,6 +841,11 @@ void __init setup_arch(char **cmdline_p)
 	init_mm.brk = (unsigned long) &_end;
 
 	parse_early_param();
+#ifdef CONFIG_CRASH_DUMP
+	/* Deactivate elfcorehdr= kernel parameter */
+	elfcorehdr_addr = ELFCORE_ADDR_MAX;
+#endif
+
 	os_info_init();
 	setup_ipl();
 
@@ -858,7 +854,6 @@ void __init setup_arch(char **cmdline_p)
 	reserve_oldmem();
 	reserve_kernel();
 	reserve_initrd();
-	reserve_elfcorehdr();
 	memblock_allow_resize();
 
 	/* Get information about *all* installed memory */
@@ -879,11 +874,13 @@ void __init setup_arch(char **cmdline_p)
 
 	check_initrd();
 	reserve_crashkernel();
+#ifdef CONFIG_CRASH_DUMP
 	/*
 	 * Be aware that smp_save_dump_cpus() triggers a system reset.
 	 * Therefore CPU and device initialization should be done afterwards.
 	 */
 	smp_save_dump_cpus();
+#endif
 
 	setup_resources();
 	setup_vmcoreinfo();

+ 80 - 81
arch/s390/kernel/smp.c

@@ -64,8 +64,9 @@ enum {
 static DEFINE_PER_CPU(struct cpu *, cpu_device);
 
 struct pcpu {
-	struct _lowcore *lowcore;	/* lowcore page(s) for the cpu */
+	struct lowcore *lowcore;	/* lowcore page(s) for the cpu */
 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
+	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
 	signed char state;		/* physical cpu state */
 	signed char polarization;	/* physical polarization */
 	u16 address;			/* physical cpu address */
@@ -80,6 +81,10 @@ EXPORT_SYMBOL(smp_cpu_mt_shift);
 unsigned int smp_cpu_mtid;
 EXPORT_SYMBOL(smp_cpu_mtid);
 
+#ifdef CONFIG_CRASH_DUMP
+__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
+#endif
+
 static unsigned int smp_max_threads __initdata = -1U;
 
 static int __init early_nosmt(char *s)
@@ -105,8 +110,7 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
 /*
  * Signal processor helper functions.
  */
-static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
-				    u32 *status)
+static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
 {
 	int cc;
 
@@ -171,6 +175,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
 	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
 		return;
 	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
+	pcpu->ec_clk = get_tod_clock_fast();
 	pcpu_sigp_retry(pcpu, order, 0);
 }
 
@@ -180,10 +185,10 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
 	unsigned long async_stack, panic_stack;
-	struct _lowcore *lc;
+	struct lowcore *lc;
 
 	if (pcpu != &pcpu_devices[0]) {
-		pcpu->lowcore =	(struct _lowcore *)
+		pcpu->lowcore =	(struct lowcore *)
 			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 		panic_stack = __get_free_page(GFP_KERNEL);
@@ -235,7 +240,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
 
 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 {
-	struct _lowcore *lc = pcpu->lowcore;
+	struct lowcore *lc = pcpu->lowcore;
 
 	if (MACHINE_HAS_TLB_LC)
 		cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
@@ -255,7 +260,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 
 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 {
-	struct _lowcore *lc = pcpu->lowcore;
+	struct lowcore *lc = pcpu->lowcore;
 	struct thread_info *ti = task_thread_info(tsk);
 
 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
@@ -271,7 +276,7 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 
 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 {
-	struct _lowcore *lc = pcpu->lowcore;
+	struct lowcore *lc = pcpu->lowcore;
 
 	lc->restart_stack = lc->kernel_stack;
 	lc->restart_fn = (unsigned long) func;
@@ -286,7 +291,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
 			  void *data, unsigned long stack)
 {
-	struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
+	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
 	unsigned long source_cpu = stap();
 
 	__load_psw_mask(PSW_KERNEL_BITS);
@@ -538,53 +543,24 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
 
 #ifdef CONFIG_CRASH_DUMP
 
-static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext,
-					 u16 address, int is_boot_cpu)
-{
-	void *lc = (void *)(unsigned long) store_prefix();
-	unsigned long vx_sa;
-
-	if (is_boot_cpu) {
-		/* Copy the registers of the boot CPU. */
-		copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
-				 SAVE_AREA_BASE - PAGE_SIZE, 0);
-		if (MACHINE_HAS_VX)
-			save_vx_regs_safe(sa_ext->vx_regs);
-		return;
-	}
-	/* Get the registers of a non-boot cpu. */
-	__pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
-	memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
-	if (!MACHINE_HAS_VX)
-		return;
-	/* Get the VX registers */
-	vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-	if (!vx_sa)
-		panic("could not allocate memory for VX save area\n");
-	__pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
-	memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
-	memblock_free(vx_sa, PAGE_SIZE);
-}
-
 int smp_store_status(int cpu)
 {
-	unsigned long vx_sa;
-	struct pcpu *pcpu;
+	struct pcpu *pcpu = pcpu_devices + cpu;
+	unsigned long pa;
 
-	pcpu = pcpu_devices + cpu;
-	if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
-			      0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
+	pa = __pa(&pcpu->lowcore->floating_pt_save_area);
+	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
+			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 		return -EIO;
 	if (!MACHINE_HAS_VX)
 		return 0;
-	vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
-	__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
-			  vx_sa, NULL);
+	pa = __pa(pcpu->lowcore->vector_save_area_addr);
+	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
+			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
+		return -EIO;
 	return 0;
 }
 
-#endif /* CONFIG_CRASH_DUMP */
-
 /*
  * Collect CPU state of the previous, crashed system.
  * There are four cases:
@@ -593,7 +569,7 @@ int smp_store_status(int cpu)
  *    The state for all CPUs except the boot CPU needs to be collected
  *    with sigp stop-and-store-status. The boot CPU state is located in
  *    the absolute lowcore of the memory stored in the HSA. The zcore code
- *    will allocate the save area and copy the boot CPU state from the HSA.
+ *    will copy the boot CPU state from the HSA.
  * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
  *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
  *    The state for all CPUs except the boot CPU needs to be collected
@@ -608,55 +584,78 @@ int smp_store_status(int cpu)
  *    stored the registers of the boot CPU in the memory of the old system.
  * 4) kdump and the old kernel stored the CPU state
  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
- *    The state of all CPUs is stored in ELF sections in the memory of the
- *    old system. The ELF sections are picked up by the crash_dump code
- *    via elfcorehdr_addr.
+ *    This case does not exist for s390 anymore, setup_arch explicitly
+ *    deactivates the elfcorehdr= kernel parameter
  */
+static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
+				     bool is_boot_cpu, unsigned long page)
+{
+	__vector128 *vxrs = (__vector128 *) page;
+
+	if (is_boot_cpu)
+		vxrs = boot_cpu_vector_save_area;
+	else
+		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
+	save_area_add_vxrs(sa, vxrs);
+}
+
+static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
+				     bool is_boot_cpu, unsigned long page)
+{
+	void *regs = (void *) page;
+
+	if (is_boot_cpu)
+		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
+	else
+		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
+	save_area_add_regs(sa, regs);
+}
+
 void __init smp_save_dump_cpus(void)
 {
-#ifdef CONFIG_CRASH_DUMP
-	int addr, cpu, boot_cpu_addr, max_cpu_addr;
-	struct save_area_ext *sa_ext;
+	int addr, boot_cpu_addr, max_cpu_addr;
+	struct save_area *sa;
+	unsigned long page;
 	bool is_boot_cpu;
 
-	if (is_kdump_kernel())
-		/* Previous system stored the CPU states. Nothing to do. */
-		return;
 	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
 		/* No previous system present, normal boot. */
 		return;
+	/* Allocate a page as dumping area for the store status sigps */
+	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
+	if (!page)
+		panic("could not allocate memory for save area\n");
 	/* Set multi-threading state to the previous system. */
 	pcpu_set_smt(sclp.mtid_prev);
-	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
-	for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
-		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
-		    SIGP_CC_NOT_OPERATIONAL)
-			continue;
-		cpu += 1;
-	}
-	dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8);
-	dump_save_areas.count = cpu;
 	boot_cpu_addr = stap();
-	for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
-		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
+	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
+	for (addr = 0; addr <= max_cpu_addr; addr++) {
+		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
 		    SIGP_CC_NOT_OPERATIONAL)
 			continue;
-		sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8);
-		dump_save_areas.areas[cpu] = sa_ext;
-		if (!sa_ext)
-			panic("could not allocate memory for save area\n");
 		is_boot_cpu = (addr == boot_cpu_addr);
-		cpu += 1;
-		if (is_boot_cpu && !OLDMEM_BASE)
-			/* Skip boot CPU for standard zfcp dump. */
-			continue;
-		/* Get state for this CPU. */
-		__smp_store_cpu_state(sa_ext, addr, is_boot_cpu);
+		/* Allocate save area */
+		sa = save_area_alloc(is_boot_cpu);
+		if (!sa)
+			panic("could not allocate memory for save area\n");
+		if (MACHINE_HAS_VX)
+			/* Get the vector registers */
+			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
+		/*
+		 * For a zfcp dump OLDMEM_BASE == NULL and the registers
+		 * of the boot CPU are stored in the HSA. To retrieve
+		 * these registers an SCLP request is required which is
+		 * done by drivers/s390/char/zcore.c:init_cpu_info()
+		 */
+		if (!is_boot_cpu || OLDMEM_BASE)
+			/* Get the CPU registers */
+			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
 	}
+	memblock_free(page, PAGE_SIZE);
 	diag308_reset();
 	pcpu_set_smt(0);
-#endif /* CONFIG_CRASH_DUMP */
 }
+#endif /* CONFIG_CRASH_DUMP */
 
 void smp_cpu_set_polarization(int cpu, int val)
 {
@@ -680,7 +679,7 @@ static struct sclp_core_info *smp_get_core_info(void)
 		for (address = 0;
 		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
 		     address += (1U << smp_cpu_mt_shift)) {
-			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
+			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
 			    SIGP_CC_NOT_OPERATIONAL)
 				continue;
 			info->core[info->configured].core_id =
@@ -924,7 +923,7 @@ void __init smp_prepare_boot_cpu(void)
 
 	pcpu->state = CPU_STATE_CONFIGURED;
 	pcpu->address = stap();
-	pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
+	pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 	set_cpu_present(0, true);

+ 10 - 10
arch/s390/kernel/sysinfo.c

@@ -111,8 +111,7 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
 
 static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
 {
-	static int max_mnest;
-	int i, rc;
+	int i;
 
 	seq_putc(m, '\n');
 	if (!MACHINE_HAS_TOPOLOGY)
@@ -123,7 +122,7 @@ static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
 	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
 		seq_printf(m, " %d", info->mag[i]);
 	seq_putc(m, '\n');
-#ifdef CONFIG_SCHED_MC
+#ifdef CONFIG_SCHED_TOPOLOGY
 	store_topology(info);
 	seq_printf(m, "CPU Topology SW:     ");
 	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
@@ -145,6 +144,10 @@ static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info)
 	seq_printf(m, "CPUs Configured:      %d\n", info->cpus_configured);
 	seq_printf(m, "CPUs Standby:         %d\n", info->cpus_standby);
 	seq_printf(m, "CPUs Reserved:        %d\n", info->cpus_reserved);
+	if (info->mt_installed) {
+		seq_printf(m, "CPUs G-MTID:          %d\n", info->mt_gtid);
+		seq_printf(m, "CPUs S-MTID:          %d\n", info->mt_stid);
+	}
 	/*
 	 * Sigh 2. According to the specification the alternate
 	 * capability field is a 32 bit floating point number
@@ -194,13 +197,10 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
 	seq_printf(m, "LPAR CPUs Reserved:   %d\n", info->cpus_reserved);
 	seq_printf(m, "LPAR CPUs Dedicated:  %d\n", info->cpus_dedicated);
 	seq_printf(m, "LPAR CPUs Shared:     %d\n", info->cpus_shared);
-	if (info->mt_installed & 0x80) {
-		seq_printf(m, "LPAR CPUs G-MTID:     %d\n",
-			   info->mt_general & 0x1f);
-		seq_printf(m, "LPAR CPUs S-MTID:     %d\n",
-			   info->mt_installed & 0x1f);
-		seq_printf(m, "LPAR CPUs PS-MTID:    %d\n",
-			   info->mt_psmtid & 0x1f);
+	if (info->mt_installed) {
+		seq_printf(m, "LPAR CPUs G-MTID:     %d\n", info->mt_gtid);
+		seq_printf(m, "LPAR CPUs S-MTID:     %d\n", info->mt_stid);
+		seq_printf(m, "LPAR CPUs PS-MTID:    %d\n", info->mt_psmtid);
 	}
 }
 

+ 0 - 3
arch/s390/kernel/traps.c

@@ -260,11 +260,8 @@ void vector_exception(struct pt_regs *regs)
 
 void data_exception(struct pt_regs *regs)
 {
-	__u16 __user *location;
 	int signal = 0;
 
-	location = get_trap_ip(regs);
-
 	save_fpu_regs();
 	if (current->thread.fpu.fpc & FPC_DXC_MASK)
 		signal = SIGFPE;

+ 11 - 6
arch/s390/kernel/vdso.c

@@ -80,7 +80,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
 /*
  * Setup vdso data page.
  */
-static void vdso_init_data(struct vdso_data *vd)
+static void __init vdso_init_data(struct vdso_data *vd)
 {
 	vd->ectg_available = test_facility(31);
 }
@@ -90,9 +90,10 @@ static void vdso_init_data(struct vdso_data *vd)
  */
 #define SEGMENT_ORDER	2
 
-int vdso_alloc_per_cpu(struct _lowcore *lowcore)
+int vdso_alloc_per_cpu(struct lowcore *lowcore)
 {
 	unsigned long segment_table, page_table, page_frame;
+	struct vdso_per_cpu_data *vd;
 	u32 *psal, *aste;
 	int i;
 
@@ -107,6 +108,12 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 	if (!segment_table || !page_table || !page_frame)
 		goto out;
 
+	/* Initialize per-cpu vdso data page */
+	vd = (struct vdso_per_cpu_data *) page_frame;
+	vd->cpu_nr = lowcore->cpu_nr;
+	vd->node_id = cpu_to_node(vd->cpu_nr);
+
+	/* Set up access register mode page table */
 	clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
 		    PAGE_SIZE << SEGMENT_ORDER);
 	clear_table((unsigned long *) page_table, _PAGE_INVALID,
@@ -138,7 +145,7 @@ out:
 	return -ENOMEM;
 }
 
-void vdso_free_per_cpu(struct _lowcore *lowcore)
+void vdso_free_per_cpu(struct lowcore *lowcore)
 {
 	unsigned long segment_table, page_table, page_frame;
 	u32 *psal, *aste;
@@ -163,7 +170,7 @@ static void vdso_init_cr5(void)
 
 	if (!vdso_enabled)
 		return;
-	cr5 = offsetof(struct _lowcore, paste);
+	cr5 = offsetof(struct lowcore, paste);
 	__ctl_load(cr5, 5, 5);
 }
 
@@ -299,8 +306,6 @@ static int __init vdso_init(void)
 
 	get_page(virt_to_page(vdso_data));
 
-	smp_mb();
-
 	return 0;
 }
 early_initcall(vdso_init);

+ 1 - 1
arch/s390/kernel/vdso32/Makefile

@@ -1,6 +1,6 @@
 # List of files in the vdso, has to be asm only for now
 
-obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o
+obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
 
 # Build rules
 

+ 43 - 0
arch/s390/kernel/vdso32/getcpu.S

@@ -0,0 +1,43 @@
+/*
+ * Userland implementation of getcpu() for 32 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ *  Copyright IBM Corp. 2016
+ *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.align 4
+	.globl __kernel_getcpu
+	.type  __kernel_getcpu,@function
+__kernel_getcpu:
+	.cfi_startproc
+	ear	%r1,%a4
+	lhi	%r4,1
+	sll	%r4,24
+	sar	%a4,%r4
+	la	%r4,0
+	epsw	%r0,0
+	sacf	512
+	l	%r5,__VDSO_CPU_NR(%r4)
+	l	%r4,__VDSO_NODE_ID(%r4)
+	tml	%r0,0x4000
+	jo	1f
+	tml	%r0,0x8000
+	jno	0f
+	sacf	256
+	j	1f
+0:	sacf	0
+1:	sar	%a4,%r1
+	ltr	%r2,%r2
+	jz	2f
+	st	%r5,0(%r2)
+2:	ltr	%r3,%r3
+	jz	3f
+	st	%r4,0(%r3)
+3:	lhi	%r2,0
+	br	%r14
+	.cfi_endproc
+	.size	__kernel_getcpu,.-__kernel_getcpu

+ 1 - 0
arch/s390/kernel/vdso32/vdso32.lds.S

@@ -132,6 +132,7 @@ VERSION
 		__kernel_gettimeofday;
 		__kernel_clock_gettime;
 		__kernel_clock_getres;
+		__kernel_getcpu;
 
 	local: *;
 	};

+ 1 - 1
arch/s390/kernel/vdso64/Makefile

@@ -1,6 +1,6 @@
 # List of files in the vdso, has to be asm only for now
 
-obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o
+obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
 
 # Build rules
 

+ 42 - 0
arch/s390/kernel/vdso64/getcpu.S

@@ -0,0 +1,42 @@
+/*
+ * Userland implementation of getcpu() for 64 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ *  Copyright IBM Corp. 2016
+ *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.align 4
+	.globl __kernel_getcpu
+	.type  __kernel_getcpu,@function
+__kernel_getcpu:
+	.cfi_startproc
+	ear	%r1,%a4
+	llilh	%r4,0x0100
+	sar	%a4,%r4
+	la	%r4,0
+	epsw	%r0,0
+	sacf	512
+	l	%r5,__VDSO_CPU_NR(%r4)
+	l	%r4,__VDSO_NODE_ID(%r4)
+	tml	%r0,0x4000
+	jo	1f
+	tml	%r0,0x8000
+	jno	0f
+	sacf	256
+	j	1f
+0:	sacf	0
+1:	sar	%a4,%r1
+	ltgr	%r2,%r2
+	jz	2f
+	st	%r5,0(%r2)
+2:	ltgr	%r3,%r3
+	jz	3f
+	st	%r4,0(%r3)
+3:	lghi	%r2,0
+	br	%r14
+	.cfi_endproc
+	.size	__kernel_getcpu,.-__kernel_getcpu

+ 1 - 0
arch/s390/kernel/vdso64/vdso64.lds.S

@@ -132,6 +132,7 @@ VERSION
 		__kernel_gettimeofday;
 		__kernel_clock_gettime;
 		__kernel_clock_getres;
+		__kernel_getcpu;
 
 	local: *;
 	};

+ 2 - 2
arch/s390/kvm/interrupt.c

@@ -499,9 +499,9 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
 
 	rc  = write_guest_lc(vcpu,
-			     offsetof(struct _lowcore, restart_old_psw),
+			     offsetof(struct lowcore, restart_old_psw),
 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
-	rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
+	rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 	clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
 	return rc ? -EFAULT : 0;

+ 15 - 15
arch/s390/kvm/kvm-s390.c

@@ -2400,37 +2400,37 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 	u64 clkcomp;
 	int rc;
 
+	px = kvm_s390_get_prefix(vcpu);
 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
 		if (write_guest_abs(vcpu, 163, &archmode, 1))
 			return -EFAULT;
-		gpa = SAVE_AREA_BASE;
+		gpa = 0;
 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
 		if (write_guest_real(vcpu, 163, &archmode, 1))
 			return -EFAULT;
-		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
-	}
-	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
+		gpa = px;
+	} else
+		gpa -= __LC_FPREGS_SAVE_AREA;
+	rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
 			     vcpu->arch.guest_fpregs.fprs, 128);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
+	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
 			      vcpu->run->s.regs.gprs, 128);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
+	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
 			      &vcpu->arch.sie_block->gpsw, 16);
-	px = kvm_s390_get_prefix(vcpu);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
+	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
 			      &px, 4);
-	rc |= write_guest_abs(vcpu,
-			      gpa + offsetof(struct save_area, fp_ctrl_reg),
+	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
 			      &vcpu->arch.guest_fpregs.fpc, 4);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
+	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
 			      &vcpu->arch.sie_block->todpr, 4);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
+	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
 			      &vcpu->arch.sie_block->cputm, 8);
 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
+	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
 			      &clkcomp, 8);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
+	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
 			      &vcpu->run->s.regs.acrs, 64);
-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
+	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
 			      &vcpu->arch.sie_block->gcr, 128);
 	return rc ? -EFAULT : 0;
 }

+ 1 - 1
arch/s390/kvm/priv.c

@@ -355,7 +355,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
 	 * into a u32 memory representation. They will remain bits 0-31.
 	 */
 	fac = *vcpu->kvm->arch.model.fac->list >> 32;
-	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
+	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
 			    &fac, sizeof(fac));
 	if (rc)
 		return rc;

+ 32 - 13
arch/s390/lib/spinlock.c

@@ -37,12 +37,22 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
 	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
 }
 
+static inline int cpu_is_preempted(int cpu)
+{
+	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+		return 0;
+	if (smp_vcpu_scheduled(cpu))
+		return 0;
+	return 1;
+}
+
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
 	unsigned int cpu = SPINLOCK_LOCKVAL;
 	unsigned int owner;
-	int count;
+	int count, first_diag;
 
+	first_diag = 1;
 	while (1) {
 		owner = ACCESS_ONCE(lp->lock);
 		/* Try to get the lock if it is free. */
@@ -51,9 +61,10 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 				return;
 			continue;
 		}
-		/* Check if the lock owner is running. */
-		if (!smp_vcpu_scheduled(~owner)) {
+		/* First iteration: check if the lock owner is running. */
+		if (first_diag && cpu_is_preempted(~owner)) {
 			smp_yield_cpu(~owner);
+			first_diag = 0;
 			continue;
 		}
 		/* Loop for a while on the lock value. */
@@ -67,10 +78,13 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 			continue;
 		/*
 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
-		 * yield the CPU if the lock is still unavailable.
+		 * yield the CPU unconditionally. For LPAR rely on the
+		 * sense running status.
 		 */
-		if (!MACHINE_IS_LPAR)
+		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
 			smp_yield_cpu(~owner);
+			first_diag = 0;
+		}
 	}
 }
 EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -79,9 +93,10 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 {
 	unsigned int cpu = SPINLOCK_LOCKVAL;
 	unsigned int owner;
-	int count;
+	int count, first_diag;
 
 	local_irq_restore(flags);
+	first_diag = 1;
 	while (1) {
 		owner = ACCESS_ONCE(lp->lock);
 		/* Try to get the lock if it is free. */
@@ -92,8 +107,9 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 			local_irq_restore(flags);
 		}
 		/* Check if the lock owner is running. */
-		if (!smp_vcpu_scheduled(~owner)) {
+		if (first_diag && cpu_is_preempted(~owner)) {
 			smp_yield_cpu(~owner);
+			first_diag = 0;
 			continue;
 		}
 		/* Loop for a while on the lock value. */
@@ -107,10 +123,13 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 			continue;
 		/*
 		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
-		 * yield the CPU if the lock is still unavailable.
+		 * yield the CPU unconditionally. For LPAR rely on the
+		 * sense running status.
 		 */
-		if (!MACHINE_IS_LPAR)
+		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
 			smp_yield_cpu(~owner);
+			first_diag = 0;
+		}
 	}
 }
 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
@@ -145,7 +164,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
 	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			if (owner && !smp_vcpu_scheduled(~owner))
+			if (owner && cpu_is_preempted(~owner))
 				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
@@ -191,7 +210,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
 	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			if (owner && !smp_vcpu_scheduled(~owner))
+			if (owner && cpu_is_preempted(~owner))
 				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
@@ -221,7 +240,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
 	owner = 0;
 	while (1) {
 		if (count-- <= 0) {
-			if (owner && !smp_vcpu_scheduled(~owner))
+			if (owner && cpu_is_preempted(~owner))
 				smp_yield_cpu(~owner);
 			count = spin_retry;
 		}
@@ -265,7 +284,7 @@ void arch_lock_relax(unsigned int cpu)
 {
 	if (!cpu)
 		return;
-	if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
+	if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
 		return;
 	smp_yield_cpu(~cpu);
 }

+ 6 - 2
arch/s390/mm/extable.c

@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
 	int i;
 
 	/* Normalize entries to being relative to the start of the section */
-	for (p = start, i = 0; p < finish; p++, i += 8)
+	for (p = start, i = 0; p < finish; p++, i += 8) {
 		p->insn += i;
+		p->fixup += i + 4;
+	}
 	sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
 	/* Denormalize all entries */
-	for (p = start, i = 0; p < finish; p++, i += 8)
+	for (p = start, i = 0; p < finish; p++, i += 8) {
 		p->insn -= i;
+		p->fixup -= i + 4;
+	}
 }
 
 #ifdef CONFIG_MODULES

+ 1 - 3
arch/s390/mm/extmem.c

@@ -94,7 +94,7 @@ static DEFINE_MUTEX(dcss_lock);
 static LIST_HEAD(dcss_list);
 static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
 					"EW/EN-MIXED" };
-static int loadshr_scode, loadnsr_scode, findseg_scode;
+static int loadshr_scode, loadnsr_scode;
 static int segext_scode, purgeseg_scode;
 static int scode_set;
 
@@ -130,7 +130,6 @@ dcss_set_subcodes(void)
 		loadshr_scode = DCSS_LOADSHRX;
 		loadnsr_scode = DCSS_LOADNSRX;
 		purgeseg_scode = DCSS_PURGESEG;
-		findseg_scode = DCSS_FINDSEGX;
 		segext_scode = DCSS_SEGEXTX;
 		return 0;
 	}
@@ -138,7 +137,6 @@ dcss_set_subcodes(void)
 	loadshr_scode = DCSS_LOADNOLY;
 	loadnsr_scode = DCSS_LOADNSR;
 	purgeseg_scode = DCSS_PURGESEG;
-	findseg_scode = DCSS_FINDSEG;
 	segext_scode = DCSS_SEGEXT;
 	return 0;
 }

+ 0 - 2
arch/s390/mm/fault.c

@@ -254,7 +254,6 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 static noinline void do_no_context(struct pt_regs *regs)
 {
 	const struct exception_table_entry *fixup;
-	unsigned long address;
 
 	/* Are we prepared to handle this kernel fault?  */
 	fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -267,7 +266,6 @@ static noinline void do_no_context(struct pt_regs *regs)
 	 * Oops. The kernel tried to access some bad page. We'll have to
 	 * terminate things with extreme prejudice.
 	 */
-	address = regs->int_parm_long & __FAIL_ADDR_MASK;
 	if (!user_space_fault(regs))
 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 		       " in virtual kernel address space\n");

+ 1 - 0
arch/s390/mm/gup.c

@@ -233,6 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 	struct mm_struct *mm = current->mm;
 	int nr, ret;
 
+	might_sleep();
 	start &= PAGE_MASK;
 	nr = __get_user_pages_fast(start, nr_pages, write, pages);
 	if (nr == nr_pages)

+ 2 - 2
arch/s390/mm/maccess.c

@@ -163,11 +163,11 @@ static int is_swapped(unsigned long addr)
 	unsigned long lc;
 	int cpu;
 
-	if (addr < sizeof(struct _lowcore))
+	if (addr < sizeof(struct lowcore))
 		return 1;
 	for_each_online_cpu(cpu) {
 		lc = (unsigned long) lowcore_ptr[cpu];
-		if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
+		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
 			continue;
 		return 1;
 	}

+ 2 - 5
arch/s390/mm/mem_detect.c

@@ -14,8 +14,6 @@
 #include <asm/sclp.h>
 #include <asm/setup.h>
 
-#define ADDR2G (1ULL << 31)
-
 #define CHUNK_READ_WRITE 0
 #define CHUNK_READ_ONLY  1
 
@@ -27,15 +25,14 @@ static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
 
 void __init detect_memory_memblock(void)
 {
-	unsigned long long memsize, rnmax, rzm;
-	unsigned long addr, size;
+	unsigned long memsize, rnmax, rzm, addr, size;
 	int type;
 
 	rzm = sclp.rzm;
 	rnmax = sclp.rnmax;
 	memsize = rzm * rnmax;
 	if (!rzm)
-		rzm = 1ULL << 17;
+		rzm = 1UL << 17;
 	max_physmem_end = memsize;
 	addr = 0;
 	/* keep memblock lists close to the kernel */

+ 1 - 2
arch/s390/pci/pci.c

@@ -701,8 +701,7 @@ static int zpci_restore(struct device *dev)
 		goto out;
 
 	zpci_map_resources(pdev);
-	zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
-			   zdev->start_dma + zdev->iommu_size - 1,
+	zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
 			   (u64) zdev->dma_table);
 
 out:

+ 14 - 5
arch/s390/pci/pci_dma.c

@@ -457,7 +457,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
 		goto out_clean;
 	}
 
-	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+	/*
+	 * Restrict the iommu bitmap size to the minimum of the following:
+	 * - main memory size
+	 * - 3-level pagetable address limit minus start_dma offset
+	 * - DMA address range allowed by the hardware (clp query pci fn)
+	 *
+	 * Also set zdev->end_dma to the actual end address of the usable
+	 * range, instead of the theoretical maximum as reported by hardware.
+	 */
+	zdev->iommu_size = min3((u64) high_memory,
+				ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+				zdev->end_dma - zdev->start_dma + 1);
+	zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
 	if (!zdev->iommu_bitmap) {
@@ -465,10 +477,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
 		goto out_reg;
 	}
 
-	rc = zpci_register_ioat(zdev,
-				0,
-				zdev->start_dma + PAGE_OFFSET,
-				zdev->start_dma + zdev->iommu_size - 1,
+	rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
 				(u64) zdev->dma_table);
 	if (rc)
 		goto out_reg;

+ 1 - 0
arch/s390/tools/.gitignore

@@ -0,0 +1 @@
+gen_facilities

+ 15 - 0
arch/s390/tools/Makefile

@@ -0,0 +1,15 @@
+#
+# Makefile for s390 specific build tools
+#
+
+hostprogs-y		    += gen_facilities
+HOSTCFLAGS_gen_facilities.o += -Wall $(LINUXINCLUDE)
+
+define filechk_facilities.h
+	$(obj)/gen_facilities
+endef
+
+$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
+
+include/generated/facilities.h: $(obj)/gen_facilities FORCE
+	$(call filechk,facilities.h)

+ 67 - 0
arch/s390/tools/gen_facilities.c

@@ -0,0 +1,67 @@
+/*
+ * Simple program to generate defines out of facility lists that use the bit
+ * numbering scheme from the Princples of Operations: most significant bit
+ * has bit number 0.
+ *
+ *    Copyright IBM Corp. 2015
+ *
+ */
+
+#define S390_GEN_FACILITIES_C
+
+#include <strings.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <asm/facilities_src.h>
+
+static void print_facility_list(struct facility_def *def)
+{
+	unsigned int high, bit, dword, i;
+	unsigned long long *array;
+
+	array = calloc(1, 8);
+	if (!array)
+		exit(EXIT_FAILURE);
+	high = 0;
+	for (i = 0; def->bits[i] != -1; i++) {
+		bit = 63 - (def->bits[i] & 63);
+		dword = def->bits[i] / 64;
+		if (dword > high) {
+			array = realloc(array, (dword + 1) * 8);
+			if (!array)
+				exit(EXIT_FAILURE);
+			memset(array + high + 1, 0, (dword - high) * 8);
+			high = dword;
+		}
+		array[dword] |= 1ULL << bit;
+	}
+	printf("#define %s ", def->name);
+	for (i = 0; i <= high; i++)
+		printf("_AC(0x%016llx,UL)%c", array[i], i < high ? ',' : '\n');
+	printf("#define %s_DWORDS %d\n", def->name, high + 1);
+	free(array);
+}
+
+static void print_facility_lists(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < sizeof(facility_defs) / sizeof(facility_defs[0]); i++)
+		print_facility_list(&facility_defs[i]);
+}
+
+int main(int argc, char **argv)
+{
+	printf("#ifndef __ASM_S390_FACILITIES__\n");
+	printf("#define __ASM_S390_FACILITIES__\n");
+	printf("/*\n");
+	printf(" * DO NOT MODIFY.\n");
+	printf(" *\n");
+	printf(" * This file was generated by %s\n", __FILE__);
+	printf(" */\n\n");
+	printf("#include <linux/const.h>\n\n");
+	print_facility_lists();
+	printf("\n#endif\n");
+	return 0;
+}

+ 6 - 2
drivers/s390/block/dasd.c

@@ -2556,8 +2556,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 		return;
 	}
 
-	/* if device ist stopped do not fetch new requests */
-	if (basedev->stopped)
+	/*
+	 * if device is stopped do not fetch new requests
+	 * except failfast is active which will let requests fail
+	 * immediately in __dasd_block_start_head()
+	 */
+	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST))
 		return;
 
 	/* Now we try to fetch requests from the request queue */

+ 8 - 13
drivers/s390/char/Kconfig

@@ -78,19 +78,6 @@ config SCLP_VT220_CONSOLE
 	  Include support for using an IBM SCLP VT220-compatible terminal as a
 	  Linux system console.
 
-config SCLP_CPI
-	def_tristate m
-	prompt "Control-Program Identification"
-	depends on S390
-	help
-	  This option enables the hardware console interface for system
-	  identification. This is commonly used for workload management and
-	  gives you a nice name for the system on the service element.
-	  Please select this option as a module since built-in operation is
-	  completely untested.
-	  You should only select this option if you know what you are doing,
-	  need this feature and intend to run your kernel in LPAR.
-
 config SCLP_ASYNC
 	def_tristate m
 	prompt "Support for Call Home via Asynchronous SCLP Records"
@@ -125,6 +112,14 @@ config HMC_DRV
 	  transfer cache size from it's default value 0.5MB to N bytes. If N
 	  is zero, then no caching is performed.
 
+config SCLP_OFB
+	def_bool n
+	prompt "Support for Open-for-Business SCLP Event"
+	depends on S390
+	help
+	  This option enables the Open-for-Business interface to the s390
+	  Service Element.
+
 config S390_TAPE
 	def_tristate m
 	prompt "S/390 tape device support"

+ 1 - 4
drivers/s390/char/Makefile

@@ -16,7 +16,6 @@ obj-$(CONFIG_TN3215) += con3215.o
 obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
 obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
 obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
-obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
 obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
 
 obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
@@ -30,9 +29,7 @@ obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
 obj-$(CONFIG_MONREADER) += monreader.o
 obj-$(CONFIG_MONWRITER) += monwriter.o
 obj-$(CONFIG_S390_VMUR) += vmur.o
-
-zcore_mod-objs := sclp_sdias.o zcore.o
-obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
+obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
 
 hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
 obj-$(CONFIG_HMC_DRV) += hmcdrv.o

+ 2 - 0
drivers/s390/char/con3215.c

@@ -922,6 +922,8 @@ static int __init con3215_init(void)
 	spin_lock_init(&raw3215_freelist_lock);
 	for (i = 0; i < NR_3215_REQ; i++) {
 		req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
+		if (!req)
+			return -ENOMEM;
 		req->next = raw3215_freelist;
 		raw3215_freelist = req;
 	}

+ 2 - 0
drivers/s390/char/con3270.c

@@ -606,6 +606,8 @@ con3270_init(void)
 		return PTR_ERR(rp);
 
 	condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
+	if (!condev)
+		return -ENOMEM;
 	condev->view.dev = rp;
 
 	condev->read = raw3270_request_alloc(0);

+ 3 - 3
drivers/s390/char/hmcdrv_ftp.c

@@ -37,7 +37,7 @@ struct hmcdrv_ftp_ops {
 static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
 static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
 
-static struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
+static const struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
 static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
 static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
 
@@ -290,13 +290,13 @@ ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
  */
 int hmcdrv_ftp_startup(void)
 {
-	static struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
 		.startup = diag_ftp_startup,
 		.shutdown = diag_ftp_shutdown,
 		.transfer = diag_ftp_cmd
 	};
 
-	static struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
 		.startup = sclp_ftp_startup,
 		.shutdown = sclp_ftp_shutdown,
 		.transfer = sclp_ftp_cmd

+ 2 - 3
drivers/s390/char/sclp.c

@@ -579,9 +579,8 @@ sclp_sync_wait(void)
 	old_tick = local_tick_disable();
 	trace_hardirqs_on();
 	__ctl_store(cr0, 0, 0);
-	cr0_sync = cr0;
-	cr0_sync &= 0xffff00a0;
-	cr0_sync |= 0x00000200;
+	cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
+	cr0_sync |= 1UL << (63 - 54);
 	__ctl_load(cr0_sync, 0, 0);
 	__arch_local_irq_stosm(0x01);
 	/* Loop until driver state indicates finished request */

+ 101 - 1
drivers/s390/char/sclp_config.c

@@ -11,6 +11,8 @@
 #include <linux/cpu.h>
 #include <linux/device.h>
 #include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
 #include <asm/smp.h>
 
 #include "sclp.h"
@@ -20,8 +22,22 @@ struct conf_mgm_data {
 	u8 ev_qualifier;
 } __attribute__((packed));
 
+#define OFB_DATA_MAX 64
+
+struct sclp_ofb_evbuf {
+	struct evbuf_header header;
+	struct conf_mgm_data cm_data;
+	char ev_data[OFB_DATA_MAX];
+} __packed;
+
+struct sclp_ofb_sccb {
+	struct sccb_header header;
+	struct sclp_ofb_evbuf ofb_evbuf;
+} __packed;
+
 #define EV_QUAL_CPU_CHANGE	1
 #define EV_QUAL_CAP_CHANGE	3
+#define EV_QUAL_OPEN4BUSINESS	5
 
 static struct work_struct sclp_cpu_capability_work;
 static struct work_struct sclp_cpu_change_work;
@@ -63,15 +79,99 @@ static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
 
 static struct sclp_register sclp_conf_register =
 {
+#ifdef CONFIG_SCLP_OFB
+	.send_mask    = EVTYP_CONFMGMDATA_MASK,
+#endif
 	.receive_mask = EVTYP_CONFMGMDATA_MASK,
 	.receiver_fn  = sclp_conf_receiver_fn,
 };
 
+#ifdef CONFIG_SCLP_OFB
+static int sclp_ofb_send_req(char *ev_data, size_t len)
+{
+	static DEFINE_MUTEX(send_mutex);
+	struct sclp_ofb_sccb *sccb;
+	int rc, response;
+
+	if (len > OFB_DATA_MAX)
+		return -EINVAL;
+	sccb = (struct sclp_ofb_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	/* Setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct sclp_ofb_sccb);
+	sccb->ofb_evbuf.header.length = sizeof(struct sclp_ofb_evbuf);
+	sccb->ofb_evbuf.header.type = EVTYP_CONFMGMDATA;
+	sccb->ofb_evbuf.cm_data.ev_qualifier = EV_QUAL_OPEN4BUSINESS;
+	memcpy(sccb->ofb_evbuf.ev_data, ev_data, len);
+
+	if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK))
+		pr_warn("SCLP receiver did not register to receive "
+			"Configuration Management Data Events.\n");
+
+	mutex_lock(&send_mutex);
+	rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+	mutex_unlock(&send_mutex);
+	if (rc)
+		goto out;
+	response = sccb->header.response_code;
+	if (response != 0x0020) {
+		pr_err("Open for Business request failed with response code "
+		       "0x%04x\n", response);
+		rc = -EIO;
+	}
+out:
+	free_page((unsigned long)sccb);
+	return rc;
+}
+
+static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buf, loff_t off, size_t count)
+{
+	int rc;
+
+	rc = sclp_ofb_send_req(buf, count);
+	return rc ?: count;
+}
+
+static struct bin_attribute ofb_bin_attr = {
+	.attr = {
+		.name = "event_data",
+		.mode = S_IWUSR,
+	},
+	.write = sysfs_ofb_data_write,
+};
+#endif
+
+static int __init sclp_ofb_setup(void)
+{
+#ifdef CONFIG_SCLP_OFB
+	struct kset *ofb_kset;
+	int rc;
+
+	ofb_kset = kset_create_and_add("ofb", NULL, firmware_kobj);
+	if (!ofb_kset)
+		return -ENOMEM;
+	rc = sysfs_create_bin_file(&ofb_kset->kobj, &ofb_bin_attr);
+	if (rc) {
+		kset_unregister(ofb_kset);
+		return rc;
+	}
+#endif
+	return 0;
+}
+
 static int __init sclp_conf_init(void)
 {
+	int rc;
+
 	INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
 	INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
-	return sclp_register(&sclp_conf_register);
+	rc = sclp_register(&sclp_conf_register);
+	if (rc)
+		return rc;
+	return sclp_ofb_setup();
 }
 
 __initcall(sclp_conf_init);

+ 0 - 40
drivers/s390/char/sclp_cpi.c

@@ -1,40 +0,0 @@
-/*
- *    SCLP control programm identification
- *
- *    Copyright IBM Corp. 2001, 2007
- *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
- *		 Michael Ernst <mernst@de.ibm.com>
- */
-
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/version.h>
-#include "sclp_cpi_sys.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Identify this operating system instance "
-		   "to the System z hardware");
-MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
-	      "Michael Ernst <mernst@de.ibm.com>");
-
-static char *system_name = "";
-static char *sysplex_name = "";
-
-module_param(system_name, charp, 0);
-MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
-module_param(sysplex_name, charp, 0);
-MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
-
-static int __init cpi_module_init(void)
-{
-	return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
-				 LINUX_VERSION_CODE);
-}
-
-static void __exit cpi_module_exit(void)
-{
-}
-
-module_init(cpi_module_init);
-module_exit(cpi_module_exit);

+ 48 - 402
drivers/s390/char/zcore.c

@@ -28,13 +28,12 @@
 #include <asm/processor.h>
 #include <asm/irqflags.h>
 #include <asm/checksum.h>
+#include <asm/os_info.h>
 #include <asm/switch_to.h>
 #include "sclp.h"
 
 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 
-#define TO_USER		1
-#define TO_KERNEL	0
 #define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
 
 enum arch_id {
@@ -42,241 +41,93 @@ enum arch_id {
 	ARCH_S390X	= 1,
 };
 
-/* dump system info */
-
-struct sys_info {
-	enum arch_id	 arch;
-	unsigned long	 sa_base;
-	u32		 sa_size;
-	int		 cpu_map[NR_CPUS];
-	unsigned long	 mem_size;
-	struct save_area lc_mask;
-};
-
 struct ipib_info {
 	unsigned long	ipib;
 	u32		checksum;
 }  __attribute__((packed));
 
-static struct sys_info sys_info;
 static struct debug_info *zcore_dbf;
 static int hsa_available;
 static struct dentry *zcore_dir;
-static struct dentry *zcore_file;
 static struct dentry *zcore_memmap_file;
 static struct dentry *zcore_reipl_file;
 static struct dentry *zcore_hsa_file;
 static struct ipl_parameter_block *ipl_block;
 
+static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
+
 /*
- * Copy memory from HSA to kernel or user memory (not reentrant):
+ * Copy memory from HSA to user memory (not reentrant):
  *
- * @dest:  Kernel or user buffer where memory should be copied to
+ * @dest:  User buffer where memory should be copied to
  * @src:   Start address within HSA where data should be copied
  * @count: Size of buffer, which should be copied
- * @mode:  Either TO_KERNEL or TO_USER
  */
-int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
 {
-	int offs, blk_num;
-	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+	unsigned long offset, bytes;
 
 	if (!hsa_available)
 		return -ENODATA;
-	if (count == 0)
-		return 0;
 
-	/* copy first block */
-	offs = 0;
-	if ((src % PAGE_SIZE) != 0) {
-		blk_num = src / PAGE_SIZE + 2;
-		if (sclp_sdias_copy(buf, blk_num, 1)) {
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
 			TRACE("sclp_sdias_copy() failed\n");
 			return -EIO;
 		}
-		offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
-		if (mode == TO_USER) {
-			if (copy_to_user((__force __user void*) dest,
-					 buf + (src % PAGE_SIZE), offs))
-				return -EFAULT;
-		} else
-			memcpy(dest, buf + (src % PAGE_SIZE), offs);
-	}
-	if (offs == count)
-		goto out;
-
-	/* copy middle */
-	for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
-		blk_num = (src + offs) / PAGE_SIZE + 2;
-		if (sclp_sdias_copy(buf, blk_num, 1)) {
-			TRACE("sclp_sdias_copy() failed\n");
-			return -EIO;
-		}
-		if (mode == TO_USER) {
-			if (copy_to_user((__force __user void*) dest + offs,
-					 buf, PAGE_SIZE))
-				return -EFAULT;
-		} else
-			memcpy(dest + offs, buf, PAGE_SIZE);
-	}
-	if (offs == count)
-		goto out;
-
-	/* copy last block */
-	blk_num = (src + offs) / PAGE_SIZE + 2;
-	if (sclp_sdias_copy(buf, blk_num, 1)) {
-		TRACE("sclp_sdias_copy() failed\n");
-		return -EIO;
-	}
-	if (mode == TO_USER) {
-		if (copy_to_user((__force __user void*) dest + offs, buf,
-				 count - offs))
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		if (copy_to_user(dest, hsa_buf + offset, bytes))
 			return -EFAULT;
-	} else
-		memcpy(dest + offs, buf, count - offs);
-out:
-	return 0;
-}
-
-static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
-{
-	return memcpy_hsa((void __force *) dest, src, count, TO_USER);
-}
-
-static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
-{
-	return memcpy_hsa(dest, src, count, TO_KERNEL);
-}
-
-static int __init init_cpu_info(enum arch_id arch)
-{
-	struct save_area_ext *sa_ext;
-
-	/* get info for boot cpu from lowcore, stored in the HSA */
-
-	sa_ext = dump_save_areas.areas[0];
-	if (!sa_ext)
-		return -ENOMEM;
-	if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
-			      sys_info.sa_size) < 0) {
-		TRACE("could not copy from HSA\n");
-		kfree(sa_ext);
-		return -EIO;
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
 	}
-	if (MACHINE_HAS_VX)
-		save_vx_regs_safe(sa_ext->vx_regs);
 	return 0;
 }
 
-static DEFINE_MUTEX(zcore_mutex);
-
-#define DUMP_VERSION	0x5
-#define DUMP_MAGIC	0xa8190173618f23fdULL
-#define DUMP_ARCH_S390X	2
-#define DUMP_ARCH_S390	1
-#define HEADER_SIZE	4096
-
-/* dump header dumped according to s390 crash dump format */
-
-struct zcore_header {
-	u64 magic;
-	u32 version;
-	u32 header_size;
-	u32 dump_level;
-	u32 page_size;
-	u64 mem_size;
-	u64 mem_start;
-	u64 mem_end;
-	u32 num_pages;
-	u32 pad1;
-	u64 tod;
-	struct cpuid cpu_id;
-	u32 arch_id;
-	u32 volnr;
-	u32 build_arch;
-	u64 rmem_size;
-	u8 mvdump;
-	u16 cpu_cnt;
-	u16 real_cpu_cnt;
-	u8 end_pad1[0x200-0x061];
-	u64 mvdump_sign;
-	u64 mvdump_zipl_time;
-	u8 end_pad2[0x800-0x210];
-	u32 lc_vec[512];
-} __attribute__((packed,__aligned__(16)));
-
-static struct zcore_header zcore_header = {
-	.magic		= DUMP_MAGIC,
-	.version	= DUMP_VERSION,
-	.header_size	= 4096,
-	.dump_level	= 0,
-	.page_size	= PAGE_SIZE,
-	.mem_start	= 0,
-	.build_arch	= DUMP_ARCH_S390X,
-};
-
 /*
- * Copy lowcore info to buffer. Use map in order to copy only register parts.
+ * Copy memory from HSA to kernel memory (not reentrant):
  *
- * @buf:    User buffer
- * @sa:     Pointer to save area
- * @sa_off: Offset in save area to copy
- * @len:    Number of bytes to copy
+ * @dest:  Kernel or user buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
  */
-static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
+int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
 {
-	int i;
-	char *lc_mask = (char*)&sys_info.lc_mask;
+	unsigned long offset, bytes;
 
-	for (i = 0; i < len; i++) {
-		if (!lc_mask[i + sa_off])
-			continue;
-		if (copy_to_user(buf + i, sa + sa_off + i, 1))
-			return -EFAULT;
+	if (!hsa_available)
+		return -ENODATA;
+
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		memcpy(dest, hsa_buf + offset, bytes);
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
 	}
 	return 0;
 }
 
-/*
- * Copy lowcores info to memory, if necessary
- *
- * @buf:   User buffer
- * @addr:  Start address of buffer in dump memory
- * @count: Size of buffer
- */
-static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
+static int __init init_cpu_info(void)
 {
-	unsigned long end;
-	int i;
-
-	if (count == 0)
-		return 0;
+	struct save_area *sa;
 
-	end = start + count;
-	for (i = 0; i < dump_save_areas.count; i++) {
-		unsigned long cp_start, cp_end; /* copy range */
-		unsigned long sa_start, sa_end; /* save area range */
-		unsigned long prefix;
-		unsigned long sa_off, len, buf_off;
-		struct save_area *save_area = &dump_save_areas.areas[i]->sa;
-
-		prefix = save_area->pref_reg;
-		sa_start = prefix + sys_info.sa_base;
-		sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
-
-		if ((end < sa_start) || (start > sa_end))
-			continue;
-		cp_start = max(start, sa_start);
-		cp_end = min(end, sa_end);
-
-		buf_off = cp_start - start;
-		sa_off = cp_start - sa_start;
-		len = cp_end - cp_start;
-
-		TRACE("copy_lc for: %lx\n", start);
-		if (copy_lc(buf + buf_off, save_area, sa_off, len))
-			return -EFAULT;
+	/* get info for boot cpu from lowcore, stored in the HSA */
+	sa = save_area_boot_cpu();
+	if (!sa)
+		return -ENOMEM;
+	if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
+		TRACE("could not copy from HSA\n");
+		return -EIO;
 	}
+	save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
 	return 0;
 }
 
@@ -289,115 +140,6 @@ static void release_hsa(void)
 	hsa_available = 0;
 }
 
-/*
- * Read routine for zcore character device
- * First 4K are dump header
- * Next 32MB are HSA Memory
- * Rest is read from absolute Memory
- */
-static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
-			  loff_t *ppos)
-{
-	unsigned long mem_start; /* Start address in memory */
-	size_t mem_offs;	 /* Offset in dump memory */
-	size_t hdr_count;	 /* Size of header part of output buffer */
-	size_t size;
-	int rc;
-
-	mutex_lock(&zcore_mutex);
-
-	if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
-		rc = -EINVAL;
-		goto fail;
-	}
-
-	count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
-
-	/* Copy dump header */
-	if (*ppos < HEADER_SIZE) {
-		size = min(count, (size_t) (HEADER_SIZE - *ppos));
-		if (copy_to_user(buf, &zcore_header + *ppos, size)) {
-			rc = -EFAULT;
-			goto fail;
-		}
-		hdr_count = size;
-		mem_start = 0;
-	} else {
-		hdr_count = 0;
-		mem_start = *ppos - HEADER_SIZE;
-	}
-
-	mem_offs = 0;
-
-	/* Copy from HSA data */
-	if (*ppos < sclp.hsa_size + HEADER_SIZE) {
-		size = min((count - hdr_count),
-			   (size_t) (sclp.hsa_size - mem_start));
-		rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
-		if (rc)
-			goto fail;
-
-		mem_offs += size;
-	}
-
-	/* Copy from real mem */
-	size = count - mem_offs - hdr_count;
-	rc = copy_to_user_real(buf + hdr_count + mem_offs,
-			       (void *) mem_start + mem_offs, size);
-	if (rc)
-		goto fail;
-
-	/*
-	 * Since s390 dump analysis tools like lcrash or crash
-	 * expect register sets in the prefix pages of the cpus,
-	 * we copy them into the read buffer, if necessary.
-	 * buf + hdr_count: Start of memory part of output buffer
-	 * mem_start: Start memory address to copy from
-	 * count - hdr_count: Size of memory area to copy
-	 */
-	if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
-		rc = -EFAULT;
-		goto fail;
-	}
-	*ppos += count;
-fail:
-	mutex_unlock(&zcore_mutex);
-	return (rc < 0) ? rc : count;
-}
-
-static int zcore_open(struct inode *inode, struct file *filp)
-{
-	if (!hsa_available)
-		return -ENODATA;
-	else
-		return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-static int zcore_release(struct inode *inode, struct file *filep)
-{
-	if (hsa_available)
-		release_hsa();
-	return 0;
-}
-
-static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
-{
-	loff_t rc;
-
-	mutex_lock(&zcore_mutex);
-	rc = no_seek_end_llseek(file, offset, orig);
-	mutex_unlock(&zcore_mutex);
-	return rc;
-}
-
-static const struct file_operations zcore_fops = {
-	.owner		= THIS_MODULE,
-	.llseek		= zcore_lseek,
-	.read		= zcore_read,
-	.open		= zcore_open,
-	.release	= zcore_release,
-};
-
 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
 				 size_t count, loff_t *ppos)
 {
@@ -501,50 +243,6 @@ static const struct file_operations zcore_hsa_fops = {
 	.llseek		= no_llseek,
 };
 
-static void __init set_lc_mask(struct save_area *map)
-{
-	memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
-	memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
-	memset(&map->psw, 0xff, sizeof(map->psw));
-	memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
-	memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
-	memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
-	memset(&map->timer, 0xff, sizeof(map->timer));
-	memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
-	memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
-	memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
-}
-
-/*
- * Initialize dump globals for a given architecture
- */
-static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
-{
-	int rc;
-
-	switch (arch) {
-	case ARCH_S390X:
-		pr_alert("DETECTED 'S390X (64 bit) OS'\n");
-		break;
-	case ARCH_S390:
-		pr_alert("DETECTED 'S390 (32 bit) OS'\n");
-		break;
-	default:
-		pr_alert("0x%x is an unknown architecture.\n",arch);
-		return -EINVAL;
-	}
-	sys_info.sa_base = SAVE_AREA_BASE;
-	sys_info.sa_size = sizeof(struct save_area);
-	sys_info.arch = arch;
-	set_lc_mask(&sys_info.lc_mask);
-	rc = init_cpu_info(arch);
-	if (rc)
-		return rc;
-	sys_info.mem_size = mem_end;
-
-	return 0;
-}
-
 static int __init check_sdias(void)
 {
 	if (!sclp.hsa_size) {
@@ -554,43 +252,6 @@ static int __init check_sdias(void)
 	return 0;
 }
 
-static int __init get_mem_info(unsigned long *mem, unsigned long *end)
-{
-	struct memblock_region *reg;
-
-	for_each_memblock(memory, reg) {
-		*mem += reg->size;
-		*end = max_t(unsigned long, *end, reg->base + reg->size);
-	}
-	return 0;
-}
-
-static void __init zcore_header_init(int arch, struct zcore_header *hdr,
-				     unsigned long mem_size)
-{
-	u32 prefix;
-	int i;
-
-	if (arch == ARCH_S390X)
-		hdr->arch_id = DUMP_ARCH_S390X;
-	else
-		hdr->arch_id = DUMP_ARCH_S390;
-	hdr->mem_size = mem_size;
-	hdr->rmem_size = mem_size;
-	hdr->mem_end = sys_info.mem_size;
-	hdr->num_pages = mem_size / PAGE_SIZE;
-	hdr->tod = get_tod_clock();
-	get_cpu_id(&hdr->cpu_id);
-	for (i = 0; i < dump_save_areas.count; i++) {
-		prefix = dump_save_areas.areas[i]->sa.pref_reg;
-		hdr->real_cpu_cnt++;
-		if (!prefix)
-			continue;
-		hdr->lc_vec[hdr->cpu_cnt] = prefix;
-		hdr->cpu_cnt++;
-	}
-}
-
 /*
  * Provide IPL parameter information block from either HSA or memory
  * for future reipl
@@ -623,11 +284,9 @@ static int __init zcore_reipl_init(void)
 
 static int __init zcore_init(void)
 {
-	unsigned long mem_size, mem_end;
 	unsigned char arch;
 	int rc;
 
-	mem_size = mem_end = 0;
 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 		return -ENODATA;
 	if (OLDMEM_BASE)
@@ -661,14 +320,10 @@ static int __init zcore_init(void)
 		goto fail;
 	}
 
-	rc = get_mem_info(&mem_size, &mem_end);
-	if (rc)
-		goto fail;
-
-	rc = sys_info_init(arch, mem_end);
+	pr_alert("DETECTED 'S390X (64 bit) OS'\n");
+	rc = init_cpu_info();
 	if (rc)
 		goto fail;
-	zcore_header_init(arch, &zcore_header, mem_size);
 
 	rc = zcore_reipl_init();
 	if (rc)
@@ -679,17 +334,11 @@ static int __init zcore_init(void)
 		rc = -ENOMEM;
 		goto fail;
 	}
-	zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
-					 &zcore_fops);
-	if (!zcore_file) {
-		rc = -ENOMEM;
-		goto fail_dir;
-	}
 	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
 						NULL, &zcore_memmap_fops);
 	if (!zcore_memmap_file) {
 		rc = -ENOMEM;
-		goto fail_file;
+		goto fail_dir;
 	}
 	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
 						NULL, &zcore_reipl_fops);
@@ -709,8 +358,6 @@ fail_reipl_file:
 	debugfs_remove(zcore_reipl_file);
 fail_memmap_file:
 	debugfs_remove(zcore_memmap_file);
-fail_file:
-	debugfs_remove(zcore_file);
 fail_dir:
 	debugfs_remove(zcore_dir);
 fail:
@@ -726,7 +373,6 @@ static void __exit zcore_exit(void)
 	debugfs_remove(zcore_hsa_file);
 	debugfs_remove(zcore_reipl_file);
 	debugfs_remove(zcore_memmap_file);
-	debugfs_remove(zcore_file);
 	debugfs_remove(zcore_dir);
 	diag308(DIAG308_REL_HSA, NULL);
 }

+ 4 - 1
drivers/s390/cio/Makefile

@@ -2,8 +2,11 @@
 # Makefile for the S/390 common i/o drivers
 #
 
+# The following is required for define_trace.h to find ./trace.h
+CFLAGS_trace.o := -I$(src)
+
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
-	fcx.o itcw.o crw.o ccwreq.o
+	fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
 ccw_device-objs += device.o device_fsm.o device_ops.o
 ccw_device-objs += device_id.o device_pgid.o device_status.o
 obj-y += ccw_device.o cmf.o

+ 1 - 0
drivers/s390/cio/airq.c

@@ -89,6 +89,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
 
 	set_cpu_flag(CIF_NOHZ_DELAY);
 	tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+	trace_s390_cio_adapter_int(tpi_info);
 	head = &airq_lists[tpi_info->isc];
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(airq, head, list)

+ 2 - 3
drivers/s390/cio/chsc_sch.c

@@ -133,7 +133,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch)
 	 * since we don't have a way to clear the subchannel and
 	 * cannot disable it with a request running.
 	 */
-	cc = stsch_err(sch->schid, &schib);
+	cc = stsch(sch->schid, &schib);
 	if (!cc && scsw_stctl(&schib.scsw))
 		return -EAGAIN;
 	return 0;
@@ -185,8 +185,7 @@ static int __init chsc_init_dbfs(void)
 	debug_set_level(chsc_debug_log_id, 2);
 	return 0;
 out:
-	if (chsc_debug_msg_id)
-		debug_unregister(chsc_debug_msg_id);
+	debug_unregister(chsc_debug_msg_id);
 	return -ENOMEM;
 }
 

+ 18 - 19
drivers/s390/cio/cio.c

@@ -41,6 +41,7 @@
 #include "blacklist.h"
 #include "cio_debug.h"
 #include "chp.h"
+#include "trace.h"
 
 debug_info_t *cio_debug_msg_id;
 debug_info_t *cio_debug_trace_id;
@@ -76,12 +77,9 @@ static int __init cio_debug_init(void)
 	return 0;
 
 out_unregister:
-	if (cio_debug_msg_id)
-		debug_unregister(cio_debug_msg_id);
-	if (cio_debug_trace_id)
-		debug_unregister(cio_debug_trace_id);
-	if (cio_debug_crw_id)
-		debug_unregister(cio_debug_crw_id);
+	debug_unregister(cio_debug_msg_id);
+	debug_unregister(cio_debug_trace_id);
+	debug_unregister(cio_debug_crw_id);
 	return -1;
 }
 
@@ -348,18 +346,18 @@ int cio_commit_config(struct subchannel *sch)
 	struct schib schib;
 	struct irb irb;
 
-	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
 		return -ENODEV;
 
 	for (retry = 0; retry < 5; retry++) {
 		/* copy desired changes to local schib */
 		cio_apply_config(sch, &schib);
-		ccode = msch_err(sch->schid, &schib);
+		ccode = msch(sch->schid, &schib);
 		if (ccode < 0) /* -EIO if msch gets a program check. */
 			return ccode;
 		switch (ccode) {
 		case 0: /* successful */
-			if (stsch_err(sch->schid, &schib) ||
+			if (stsch(sch->schid, &schib) ||
 			    !css_sch_is_valid(&schib))
 				return -ENODEV;
 			if (cio_check_config(sch, &schib)) {
@@ -394,7 +392,7 @@ int cio_update_schib(struct subchannel *sch)
 {
 	struct schib schib;
 
-	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
 		return -ENODEV;
 
 	memcpy(&sch->schib, &schib, sizeof(schib));
@@ -503,7 +501,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
 	 * If stsch gets an exception, it means the current subchannel set
 	 * is not valid.
 	 */
-	ccode = stsch_err(schid, &sch->schib);
+	ccode = stsch(schid, &sch->schib);
 	if (ccode) {
 		err = (ccode == 3) ? -ENXIO : ccode;
 		goto out;
@@ -542,6 +540,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
 
 	set_cpu_flag(CIF_NOHZ_DELAY);
 	tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+	trace_s390_cio_interrupt(tpi_info);
 	irb = this_cpu_ptr(&cio_irb);
 	sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
 	if (!sch) {
@@ -619,7 +618,7 @@ static int cio_test_for_console(struct subchannel_id schid, void *data)
 {
 	struct schib schib;
 
-	if (stsch_err(schid, &schib) != 0)
+	if (stsch(schid, &schib) != 0)
 		return -ENXIO;
 	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
 	    (schib.pmcw.dev == console_devno)) {
@@ -638,7 +637,7 @@ static int cio_get_console_sch_no(void)
 	if (console_irq != -1) {
 		/* VM provided us with the irq number of the console. */
 		schid.sch_no = console_irq;
-		if (stsch_err(schid, &schib) != 0 ||
+		if (stsch(schid, &schib) != 0 ||
 		    (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
 			return -1;
 		console_devno = schib.pmcw.dev;
@@ -708,10 +707,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
 	cc = 0;
 	for (retry=0;retry<3;retry++) {
 		schib->pmcw.ena = 0;
-		cc = msch_err(schid, schib);
+		cc = msch(schid, schib);
 		if (cc)
 			return (cc==3?-ENODEV:-EBUSY);
-		if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
+		if (stsch(schid, schib) || !css_sch_is_valid(schib))
 			return -ENODEV;
 		if (!schib->pmcw.ena)
 			return 0;
@@ -758,7 +757,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
 
 	pgm_check_occured = 0;
 	s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
-	rc = stsch_err(schid, addr);
+	rc = stsch(schid, addr);
 	s390_base_pgm_handler_fn = NULL;
 
 	/* The program check handler could have changed pgm_check_occured. */
@@ -795,7 +794,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
 			/* No default clear strategy */
 			break;
 		}
-		stsch_err(schid, &schib);
+		stsch(schid, &schib);
 		__disable_subchannel_easy(schid, &schib);
 	}
 out:
@@ -917,7 +916,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
 {
 	struct subchannel_id uninitialized_var(schid);
 
-	s390_reset_system(NULL, NULL, NULL);
+	s390_reset_system();
 	if (reipl_find_schid(devid, &schid) != 0)
 		panic("IPL Device not found\n");
 	do_reipl_asm(*((__u32*)&schid));
@@ -943,7 +942,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
 		if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
 			return -ENODEV;
 	}
-	if (stsch_err(schid, &schib))
+	if (stsch(schid, &schib))
 		return -ENODEV;
 	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
 		return -ENODEV;

+ 12 - 0
drivers/s390/cio/cio.h

@@ -45,6 +45,18 @@ struct pmcw {
 				/*  ... in an operand exception.       */
 } __attribute__ ((packed));
 
+/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
+struct tpi_info {
+	struct subchannel_id schid;
+	u32 intparm;
+	u32 adapter_IO:1;
+	u32 :1;
+	u32 isc:3;
+	u32 :27;
+	u32 type:3;
+	u32 :12;
+} __packed __aligned(4);
+
 /* Target SCHIB configuration. */
 struct schib_config {
 	u64 mba;

+ 1 - 0
drivers/s390/cio/crw.c

@@ -14,6 +14,7 @@
 #include <linux/wait.h>
 #include <asm/crw.h>
 #include <asm/ctl_reg.h>
+#include "ioasm.h"
 
 static DEFINE_MUTEX(crw_handler_mutex);
 static crw_handler_t crw_handlers[NR_RSCS];

+ 1 - 1
drivers/s390/cio/css.c

@@ -390,7 +390,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 		/* Will be done on the slow path. */
 		return -EAGAIN;
 	}
-	if (stsch_err(schid, &schib)) {
+	if (stsch(schid, &schib)) {
 		/* Subchannel is not provided. */
 		return -ENXIO;
 	}

+ 1 - 1
drivers/s390/cio/device_fsm.c

@@ -44,7 +44,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
 	sch = to_subchannel(cdev->dev.parent);
 	private = to_io_private(sch);
 	orb = &private->orb;
-	cc = stsch_err(sch->schid, &schib);
+	cc = stsch(sch->schid, &schib);
 
 	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
 	       "device information:\n", get_tod_clock());

+ 0 - 45
drivers/s390/cio/io_sch.h

@@ -169,49 +169,4 @@ struct ccw_device_private {
 	enum interruption_class int_class;
 };
 
-static inline int rsch(struct subchannel_id schid)
-{
-	register struct subchannel_id reg1 asm("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	rsch\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode)
-		: "d" (reg1)
-		: "cc", "memory");
-	return ccode;
-}
-
-static inline int hsch(struct subchannel_id schid)
-{
-	register struct subchannel_id reg1 asm("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	hsch\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode)
-		: "d" (reg1)
-		: "cc");
-	return ccode;
-}
-
-static inline int xsch(struct subchannel_id schid)
-{
-	register struct subchannel_id reg1 asm("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	.insn	rre,0xb2760000,%1,0\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode)
-		: "d" (reg1)
-		: "cc");
-	return ccode;
-}
-
 #endif

+ 224 - 0
drivers/s390/cio/ioasm.c

@@ -0,0 +1,224 @@
+/*
+ * Channel subsystem I/O instructions.
+ */
+
+#include <linux/export.h>
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include <asm/crw.h>
+
+#include "ioasm.h"
+#include "orb.h"
+#include "cio.h"
+
+int stsch(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	stsch	0(%3)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	trace_s390_cio_stsch(schid, addr, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(stsch);
+
+int msch(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	msch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc");
+	trace_s390_cio_msch(schid, addr, ccode);
+
+	return ccode;
+}
+
+int tsch(struct subchannel_id schid, struct irb *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	tsch	0(%3)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	trace_s390_cio_tsch(schid, addr, ccode);
+
+	return ccode;
+}
+
+int ssch(struct subchannel_id schid, union orb *addr)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	ssch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc", "memory");
+	trace_s390_cio_ssch(schid, addr, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(ssch);
+
+int csch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	csch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	trace_s390_cio_csch(schid, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(csch);
+
+int tpi(struct tpi_info *addr)
+{
+	int ccode;
+
+	asm volatile(
+		"	tpi	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "a" (addr)
+		: "cc");
+	trace_s390_cio_tpi(addr, ccode);
+
+	return ccode;
+}
+
+int chsc(void *chsc_area)
+{
+	typedef struct { char _[4096]; } addr_type;
+	int cc;
+
+	asm volatile(
+		"	.insn	rre,0xb25f0000,%2,0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc), "=m" (*(addr_type *) chsc_area)
+		: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+		: "cc");
+	trace_s390_cio_chsc(chsc_area, cc);
+
+	return cc;
+}
+EXPORT_SYMBOL(chsc);
+
+int rchp(struct chp_id chpid)
+{
+	register struct chp_id reg1 asm ("1") = chpid;
+	int ccode;
+
+	asm volatile(
+		"	lr	1,%1\n"
+		"	rchp\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode) : "d" (reg1) : "cc");
+	trace_s390_cio_rchp(chpid, ccode);
+
+	return ccode;
+}
+
+int rsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	rsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc", "memory");
+	trace_s390_cio_rsch(schid, ccode);
+
+	return ccode;
+}
+
+int hsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	hsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	trace_s390_cio_hsch(schid, ccode);
+
+	return ccode;
+}
+
+int xsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	xsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	trace_s390_cio_xsch(schid, ccode);
+
+	return ccode;
+}
+
+int stcrw(struct crw *crw)
+{
+	int ccode;
+
+	asm volatile(
+		"	stcrw	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (ccode), "=m" (*crw)
+		: "a" (crw)
+		: "cc");
+	trace_s390_cio_stcrw(crw, ccode);
+
+	return ccode;
+}

+ 15 - 154
drivers/s390/cio/ioasm.h

@@ -3,165 +3,26 @@
 
 #include <asm/chpid.h>
 #include <asm/schid.h>
+#include <asm/crw.h>
 #include "orb.h"
 #include "cio.h"
+#include "trace.h"
 
 /*
- * TPI info structure
+ * Some S390 specific IO instructions
  */
-struct tpi_info {
-	struct subchannel_id schid;
-	__u32 intparm;		 /* interruption parameter */
-	__u32 adapter_IO : 1;
-	__u32 reserved2	 : 1;
-	__u32 isc	 : 3;
-	__u32 reserved3	 : 12;
-	__u32 int_type	 : 3;
-	__u32 reserved4	 : 12;
-} __attribute__ ((packed));
 
-
-/*
- * Some S390 specific IO instructions as inline
- */
-
-static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
-{
-	register struct subchannel_id reg1 asm ("1") = schid;
-	int ccode = -EIO;
-
-	asm volatile(
-		"	stsch	0(%3)\n"
-		"0:	ipm	%0\n"
-		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "+d" (ccode), "=m" (*addr)
-		: "d" (reg1), "a" (addr)
-		: "cc");
-	return ccode;
-}
-
-static inline int msch(struct subchannel_id schid, struct schib *addr)
-{
-	register struct subchannel_id reg1 asm ("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	msch	0(%2)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode)
-		: "d" (reg1), "a" (addr), "m" (*addr)
-		: "cc");
-	return ccode;
-}
-
-static inline int msch_err(struct subchannel_id schid, struct schib *addr)
-{
-	register struct subchannel_id reg1 asm ("1") = schid;
-	int ccode = -EIO;
-
-	asm volatile(
-		"	msch	0(%2)\n"
-		"0:	ipm	%0\n"
-		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "+d" (ccode)
-		: "d" (reg1), "a" (addr), "m" (*addr)
-		: "cc");
-	return ccode;
-}
-
-static inline int tsch(struct subchannel_id schid, struct irb *addr)
-{
-	register struct subchannel_id reg1 asm ("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	tsch	0(%3)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode), "=m" (*addr)
-		: "d" (reg1), "a" (addr)
-		: "cc");
-	return ccode;
-}
-
-static inline int ssch(struct subchannel_id schid, union orb *addr)
-{
-	register struct subchannel_id reg1 asm("1") = schid;
-	int ccode = -EIO;
-
-	asm volatile(
-		"	ssch	0(%2)\n"
-		"0:	ipm	%0\n"
-		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b, 1b)
-		: "+d" (ccode)
-		: "d" (reg1), "a" (addr), "m" (*addr)
-		: "cc", "memory");
-	return ccode;
-}
-
-static inline int csch(struct subchannel_id schid)
-{
-	register struct subchannel_id reg1 asm("1") = schid;
-	int ccode;
-
-	asm volatile(
-		"	csch\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode)
-		: "d" (reg1)
-		: "cc");
-	return ccode;
-}
-
-static inline int tpi(struct tpi_info *addr)
-{
-	int ccode;
-
-	asm volatile(
-		"	tpi	0(%2)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode), "=m" (*addr)
-		: "a" (addr)
-		: "cc");
-	return ccode;
-}
-
-static inline int chsc(void *chsc_area)
-{
-	typedef struct { char _[4096]; } addr_type;
-	int cc;
-
-	asm volatile(
-		"	.insn	rre,0xb25f0000,%2,0\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (cc), "=m" (*(addr_type *) chsc_area)
-		: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
-		: "cc");
-	return cc;
-}
-
-static inline int rchp(struct chp_id chpid)
-{
-	register struct chp_id reg1 asm ("1") = chpid;
-	int ccode;
-
-	asm volatile(
-		"	lr	1,%1\n"
-		"	rchp\n"
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=d" (ccode) : "d" (reg1) : "cc");
-	return ccode;
-}
+int stsch(struct subchannel_id schid, struct schib *addr);
+int msch(struct subchannel_id schid, struct schib *addr);
+int tsch(struct subchannel_id schid, struct irb *addr);
+int ssch(struct subchannel_id schid, union orb *addr);
+int csch(struct subchannel_id schid);
+int tpi(struct tpi_info *addr);
+int chsc(void *chsc_area);
+int rchp(struct chp_id chpid);
+int rsch(struct subchannel_id schid);
+int hsch(struct subchannel_id schid);
+int xsch(struct subchannel_id schid);
+int stcrw(struct crw *crw);
 
 #endif

+ 2 - 4
drivers/s390/cio/qdio_debug.c

@@ -366,8 +366,6 @@ void qdio_debug_exit(void)
 {
 	qdio_clear_dbf_list();
 	debugfs_remove(debugfs_root);
-	if (qdio_dbf_setup)
-		debug_unregister(qdio_dbf_setup);
-	if (qdio_dbf_error)
-		debug_unregister(qdio_dbf_error);
+	debug_unregister(qdio_dbf_setup);
+	debug_unregister(qdio_dbf_error);
 }

+ 24 - 0
drivers/s390/cio/trace.c

@@ -0,0 +1,24 @@
+/*
+ * Tracepoint definitions for s390_cio
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <asm/crw.h>
+#include "cio.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_stsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_msch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tpi);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_ssch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);

+ 363 - 0
drivers/s390/cio/trace.h

@@ -0,0 +1,363 @@
+/*
+ * Tracepoint header for the s390 Common I/O layer (CIO)
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/crw.h>
+#include <uapi/asm/chpid.h>
+#include <uapi/asm/schid.h>
+#include "cio.h"
+#include "orb.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_CIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_CIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(s390_class_schib,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(u16, devno)
+		__field_struct(struct schib, schib)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->devno = schib->pmcw.dev;
+		__entry->schib = *schib;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
+		  "lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
+		  "pom=0x%02x chpids=%016llx",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  __entry->schib.pmcw.ena, __entry->schib.pmcw.st,
+		  __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev,
+		  __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom,
+		  __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim,
+		  __entry->schib.pmcw.pam, __entry->schib.pmcw.pom,
+		  *((u64 *) __entry->schib.pmcw.chpid)
+	)
+);
+
+/**
+ * s390_cio_stsch -  Store Subchannel instruction (STSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_stsch,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_msch -  Modify Subchannel instruction (MSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_msch,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_tsch - Test Subchannel instruction (TSCH) was performed
+ * @schid: Subchannel ID
+ * @irb: Interruption-Response Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tsch,
+	TP_PROTO(struct subchannel_id schid, struct irb *irb, int cc),
+	TP_ARGS(schid, irb, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field_struct(struct irb, irb)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->irb = *irb;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
+		  "stctl=0x%x dstat=0x%x cstat=0x%x",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw),
+		  scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw),
+		  scsw_stctl(&__entry->irb.scsw),
+		  scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw)
+	)
+);
+
+/**
+ * s390_cio_tpi - Test Pending Interruption instruction (TPI) was performed
+ * @addr: Address of the I/O interruption code or %NULL
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tpi,
+	TP_PROTO(struct tpi_info *addr, int cc),
+	TP_ARGS(addr, cc),
+	TP_STRUCT__entry(
+		__field(int, cc)
+		__field_struct(struct tpi_info, tpi_info)
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+	),
+	TP_fast_assign(
+		__entry->cc = cc;
+		if (cc != 0)
+			memset(&__entry->tpi_info, 0, sizeof(struct tpi_info));
+		else if (addr)
+			__entry->tpi_info = *addr;
+		else {
+			memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id,
+			       sizeof(struct tpi_info));
+		}
+		__entry->cssid = __entry->tpi_info.schid.cssid;
+		__entry->ssid = __entry->tpi_info.schid.ssid;
+		__entry->schno = __entry->tpi_info.schid.sch_no;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  __entry->tpi_info.adapter_IO, __entry->tpi_info.isc,
+		  __entry->tpi_info.type
+	)
+);
+
+/**
+ * s390_cio_ssch - Start Subchannel instruction (SSCH) was performed
+ * @schid: Subchannel ID
+ * @orb: Operation-Request Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_ssch,
+	TP_PROTO(struct subchannel_id schid, union orb *orb, int cc),
+	TP_ARGS(schid, orb, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field_struct(union orb, orb)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->orb = *orb;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+		  __entry->schno, __entry->cc
+	)
+);
+
+DECLARE_EVENT_CLASS(s390_class_schid,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+		  __entry->schno, __entry->cc
+	)
+);
+
+/**
+ * s390_cio_csch - Clear Subchannel instruction (CSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_csch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_hsch - Halt Subchannel instruction (HSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_hsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_xsch - Cancel Subchannel instruction (XSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_xsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_rsch - Resume Subchannel instruction (RSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
+ * @chpid: Channel-Path Identifier
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_rchp,
+	TP_PROTO(struct chp_id chpid, int cc),
+	TP_ARGS(chpid, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, id)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = chpid.cssid;
+		__entry->id = chpid.id;
+		__entry->cc = cc;
+	),
+	TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
+		  __entry->cc
+	)
+);
+
+#define CHSC_MAX_REQUEST_LEN		64
+#define CHSC_MAX_RESPONSE_LEN		64
+
+/**
+ * s390_cio_chsc - Channel Subsystem Call (CHSC) instruction was performed
+ * @chsc: CHSC block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_chsc,
+	TP_PROTO(struct chsc_header *chsc, int cc),
+	TP_ARGS(chsc, cc),
+	TP_STRUCT__entry(
+		__field(int, cc)
+		__field(u16, code)
+		__field(u16, rcode)
+		__array(u8, request, CHSC_MAX_REQUEST_LEN)
+		__array(u8, response, CHSC_MAX_RESPONSE_LEN)
+	),
+	TP_fast_assign(
+		__entry->cc = cc;
+		__entry->code = chsc->code;
+		memcpy(&entry->request, chsc,
+		       min_t(u16, chsc->length, CHSC_MAX_REQUEST_LEN));
+		chsc = (struct chsc_header *) ((char *) chsc + chsc->length);
+		__entry->rcode = chsc->code;
+		memcpy(&entry->response, chsc,
+		       min_t(u16, chsc->length, CHSC_MAX_RESPONSE_LEN));
+	),
+	TP_printk("code=0x%04x cc=%d rcode=0x%04x", __entry->code,
+		  __entry->cc, __entry->rcode)
+);
+
+/**
+ * s390_cio_interrupt - An I/O interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_interrupt,
+	TP_PROTO(struct tpi_info *tpi_info),
+	TP_ARGS(tpi_info),
+	TP_STRUCT__entry(
+		__field_struct(struct tpi_info, tpi_info)
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+	),
+	TP_fast_assign(
+		__entry->tpi_info = *tpi_info;
+		__entry->cssid = __entry->tpi_info.schid.cssid;
+		__entry->ssid = __entry->tpi_info.schid.ssid;
+		__entry->schno = __entry->tpi_info.schid.sch_no;
+	),
+	TP_printk("schid=%x.%x.%04x isc=%d type=%d",
+		  __entry->cssid, __entry->ssid, __entry->schno,
+		  __entry->tpi_info.isc, __entry->tpi_info.type
+	)
+);
+
+/**
+ * s390_cio_adapter_int - An adapter interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_adapter_int,
+	TP_PROTO(struct tpi_info *tpi_info),
+	TP_ARGS(tpi_info),
+	TP_STRUCT__entry(
+		__field_struct(struct tpi_info, tpi_info)
+	),
+	TP_fast_assign(
+		__entry->tpi_info = *tpi_info;
+	),
+	TP_printk("isc=%d", __entry->tpi_info.isc)
+);
+
+/**
+ * s390_cio_stcrw - Store Channel Report Word (STCRW) was performed
+ * @crw: Channel Report Word
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_stcrw,
+	TP_PROTO(struct crw *crw, int cc),
+	TP_ARGS(crw, cc),
+	TP_STRUCT__entry(
+		__field_struct(struct crw, crw)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->crw = *crw;
+		__entry->cc = cc;
+	),
+	TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
+		  "rsid=0x%x",
+		  __entry->cc, __entry->crw.slct, __entry->crw.oflw,
+		  __entry->crw.chn, __entry->crw.rsc,  __entry->crw.anc,
+		  __entry->crw.erc, __entry->crw.rsid
+	)
+);
+
+#endif /* _TRACE_S390_CIO_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>

+ 2 - 4
drivers/s390/crypto/zcrypt_api.c

@@ -1428,10 +1428,8 @@ int __init zcrypt_debug_init(void)
 void zcrypt_debug_exit(void)
 {
 	debugfs_remove(debugfs_root);
-	if (zcrypt_dbf_common)
-		debug_unregister(zcrypt_dbf_common);
-	if (zcrypt_dbf_devices)
-		debug_unregister(zcrypt_dbf_devices);
+	debug_unregister(zcrypt_dbf_common);
+	debug_unregister(zcrypt_dbf_devices);
 }
 
 /**

+ 2 - 1
scripts/Makefile.lib

@@ -104,8 +104,9 @@ modname_flags  = $(if $(filter 1,$(words $(modname))),\
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
 _c_flags       = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
-_a_flags       = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
+orig_a_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
                  $(asflags-y) $(AFLAGS_$(basetarget).o)
+_a_flags       = $(filter-out $(AFLAGS_REMOVE_$(basetarget).o), $(orig_a_flags))
 _cpp_flags     = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F))
 
 #