Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Small overlapping change conflict ('net' changed a line,
'net-next' added a line right afterwards) in flexcan.c

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 7 năm trước cách đây
mục cha
commit
7cda4cee13
100 tập tin đã thay đổi với 712 bổ sung426 xóa
  1. 4 0
      Documentation/devicetree/bindings/hwmon/jc42.txt
  2. 0 7
      Documentation/sysctl/vm.txt
  3. 16 7
      MAINTAINERS
  4. 1 1
      Makefile
  5. 0 1
      arch/arm/include/asm/pgtable-3level.h
  6. 2 2
      arch/arm/kernel/entry-header.S
  7. 0 3
      arch/arm64/Makefile
  8. 1 1
      arch/arm64/include/asm/cacheflush.h
  9. 45 1
      arch/arm64/include/asm/module.h
  10. 0 1
      arch/arm64/include/asm/pgtable.h
  11. 0 3
      arch/arm64/kernel/Makefile
  12. 3 3
      arch/arm64/kernel/cpu_ops.c
  13. 3 3
      arch/arm64/kernel/fpsimd.c
  14. 0 18
      arch/arm64/kernel/ftrace-mod.S
  15. 8 6
      arch/arm64/kernel/ftrace.c
  16. 14 36
      arch/arm64/kernel/module-plts.c
  17. 1 0
      arch/arm64/kernel/module.lds
  18. 0 6
      arch/arm64/kernel/perf_event.c
  19. 16 12
      arch/arm64/mm/context.c
  20. 1 1
      arch/arm64/mm/pgd.c
  21. 1 1
      arch/mips/include/asm/pgtable.h
  22. 2 5
      arch/mips/kvm/mips.c
  23. 0 1
      arch/powerpc/include/asm/book3s/64/pgtable.h
  24. 1 0
      arch/powerpc/include/asm/kvm_ppc.h
  25. 2 0
      arch/powerpc/kernel/misc_64.S
  26. 9 3
      arch/powerpc/kernel/process.c
  27. 23 14
      arch/powerpc/kvm/book3s_64_mmu_hv.c
  28. 1 2
      arch/powerpc/kvm/book3s_hv.c
  29. 2 5
      arch/powerpc/kvm/powerpc.c
  30. 12 3
      arch/powerpc/mm/hash_native_64.c
  31. 1 0
      arch/riscv/include/asm/Kbuild
  32. 6 6
      arch/riscv/include/asm/asm.h
  33. 54 49
      arch/riscv/include/asm/atomic.h
  34. 0 23
      arch/riscv/include/asm/barrier.h
  35. 1 1
      arch/riscv/include/asm/bitops.h
  36. 3 3
      arch/riscv/include/asm/bug.h
  37. 26 4
      arch/riscv/include/asm/cacheflush.h
  38. 10 8
      arch/riscv/include/asm/io.h
  39. 4 0
      arch/riscv/include/asm/mmu.h
  40. 45 0
      arch/riscv/include/asm/mmu_context.h
  41. 32 26
      arch/riscv/include/asm/pgtable.h
  42. 1 10
      arch/riscv/include/asm/spinlock.h
  43. 2 1
      arch/riscv/include/asm/timex.h
  44. 6 1
      arch/riscv/include/asm/tlbflush.h
  45. 28 0
      arch/riscv/include/asm/vdso-syscalls.h
  46. 4 0
      arch/riscv/include/asm/vdso.h
  47. 0 3
      arch/riscv/kernel/head.S
  48. 3 0
      arch/riscv/kernel/riscv_ksyms.c
  49. 5 0
      arch/riscv/kernel/setup.c
  50. 55 0
      arch/riscv/kernel/smp.c
  51. 32 1
      arch/riscv/kernel/sys_riscv.c
  52. 2 0
      arch/riscv/kernel/syscall_table.c
  53. 6 1
      arch/riscv/kernel/vdso/Makefile
  54. 26 0
      arch/riscv/kernel/vdso/clock_getres.S
  55. 26 0
      arch/riscv/kernel/vdso/clock_gettime.S
  56. 31 0
      arch/riscv/kernel/vdso/flush_icache.S
  57. 26 0
      arch/riscv/kernel/vdso/getcpu.S
  58. 26 0
      arch/riscv/kernel/vdso/gettimeofday.S
  59. 5 2
      arch/riscv/kernel/vdso/vdso.lds.S
  60. 1 0
      arch/riscv/lib/delay.c
  61. 1 0
      arch/riscv/mm/Makefile
  62. 23 0
      arch/riscv/mm/cacheflush.c
  63. 1 1
      arch/riscv/mm/ioremap.c
  64. 1 4
      arch/s390/Makefile
  65. 1 0
      arch/s390/appldata/appldata_base.c
  66. 1 0
      arch/s390/appldata/appldata_mem.c
  67. 1 0
      arch/s390/appldata/appldata_net_sum.c
  68. 1 0
      arch/s390/appldata/appldata_os.c
  69. 1 4
      arch/s390/boot/install.sh
  70. 1 6
      arch/s390/crypto/aes_s390.c
  71. 1 5
      arch/s390/crypto/arch_random.c
  72. 1 0
      arch/s390/crypto/crc32-vx.c
  73. 1 6
      arch/s390/crypto/des_s390.c
  74. 1 0
      arch/s390/crypto/ghash_s390.c
  75. 1 5
      arch/s390/crypto/paes_s390.c
  76. 1 0
      arch/s390/crypto/prng.c
  77. 1 6
      arch/s390/crypto/sha.h
  78. 1 6
      arch/s390/crypto/sha256_s390.c
  79. 1 6
      arch/s390/crypto/sha512_s390.c
  80. 1 6
      arch/s390/crypto/sha_common.c
  81. 1 1
      arch/s390/hypfs/inode.c
  82. 1 4
      arch/s390/include/asm/cpu_mf.h
  83. 8 7
      arch/s390/include/asm/elf.h
  84. 1 14
      arch/s390/include/asm/kprobes.h
  85. 1 4
      arch/s390/include/asm/kvm_host.h
  86. 1 6
      arch/s390/include/asm/kvm_para.h
  87. 1 7
      arch/s390/include/asm/livepatch.h
  88. 1 1
      arch/s390/include/asm/mmu_context.h
  89. 7 1
      arch/s390/include/asm/pgtable.h
  90. 1 4
      arch/s390/include/asm/syscall.h
  91. 1 4
      arch/s390/include/asm/sysinfo.h
  92. 1 0
      arch/s390/include/asm/topology.h
  93. 0 4
      arch/s390/include/uapi/asm/kvm.h
  94. 0 4
      arch/s390/include/uapi/asm/kvm_para.h
  95. 0 4
      arch/s390/include/uapi/asm/kvm_perf.h
  96. 0 4
      arch/s390/include/uapi/asm/virtio-ccw.h
  97. 0 14
      arch/s390/include/uapi/asm/zcrypt.h
  98. 1 1
      arch/s390/kernel/debug.c
  99. 8 2
      arch/s390/kernel/dis.c
  100. 1 0
      arch/s390/kernel/dumpstack.c

+ 4 - 0
Documentation/devicetree/bindings/hwmon/jc42.txt

@@ -34,6 +34,10 @@ Required properties:
 
 
 - reg: I2C address
 - reg: I2C address
 
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+			 This is not supported on all chips.
+
 Example:
 Example:
 
 
 temp-sensor@1a {
 temp-sensor@1a {

+ 0 - 7
Documentation/sysctl/vm.txt

@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
 value lower than this limit will be ignored and the old configuration will be
 retained.
 retained.
 
 
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
 ==============================================================
 ==============================================================
 
 
 dirty_expire_centisecs
 dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
 
 
 The total available memory is not equal to total system memory.
 The total available memory is not equal to total system memory.
 
 
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
 ==============================================================
 ==============================================================
 
 
 dirty_writeback_centisecs
 dirty_writeback_centisecs

+ 16 - 7
MAINTAINERS

@@ -554,13 +554,13 @@ S:	Orphan
 F:	Documentation/filesystems/affs.txt
 F:	Documentation/filesystems/affs.txt
 F:	fs/affs/
 F:	fs/affs/
 
 
-AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
+AFS FILESYSTEM
 M:	David Howells <dhowells@redhat.com>
 M:	David Howells <dhowells@redhat.com>
 L:	linux-afs@lists.infradead.org
 L:	linux-afs@lists.infradead.org
 S:	Supported
 S:	Supported
 F:	fs/afs/
 F:	fs/afs/
-F:	include/net/af_rxrpc.h
-F:	net/rxrpc/af_rxrpc.c
+F:	include/trace/events/afs.h
+F:	Documentation/filesystems/afs.txt
 W:	https://www.infradead.org/~dhowells/kafs/
 W:	https://www.infradead.org/~dhowells/kafs/
 
 
 AGPGART DRIVER
 AGPGART DRIVER
@@ -6174,7 +6174,6 @@ M:	Jean Delvare <jdelvare@suse.com>
 M:	Guenter Roeck <linux@roeck-us.net>
 M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-hwmon@vger.kernel.org
 L:	linux-hwmon@vger.kernel.org
 W:	http://hwmon.wiki.kernel.org/
 W:	http://hwmon.wiki.kernel.org/
-T:	quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:	Maintained
 S:	Maintained
 F:	Documentation/hwmon/
 F:	Documentation/hwmon/
@@ -11782,6 +11781,18 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
 S:	Maintained
 S:	Maintained
 F:	drivers/net/wireless/realtek/rtl8xxxu/
 F:	drivers/net/wireless/realtek/rtl8xxxu/
 
 
+RXRPC SOCKETS (AF_RXRPC)
+M:	David Howells <dhowells@redhat.com>
+L:	linux-afs@lists.infradead.org
+S:	Supported
+F:	net/rxrpc/
+F:	include/keys/rxrpc-type.h
+F:	include/net/af_rxrpc.h
+F:	include/trace/events/rxrpc.h
+F:	include/uapi/linux/rxrpc.h
+F:	Documentation/networking/rxrpc.txt
+W:	https://www.infradead.org/~dhowells/kafs/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:	Antonino Daplas <adaplas@gmail.com>
 M:	Antonino Daplas <adaplas@gmail.com>
 L:	linux-fbdev@vger.kernel.org
 L:	linux-fbdev@vger.kernel.org
@@ -13653,10 +13664,8 @@ F:	drivers/net/wireless/ti/
 F:	include/linux/wl12xx.h
 F:	include/linux/wl12xx.h
 
 
 TILE ARCHITECTURE
 TILE ARCHITECTURE
-M:	Chris Metcalf <cmetcalf@mellanox.com>
 W:	http://www.mellanox.com/repository/solutions/tile-scm/
 W:	http://www.mellanox.com/repository/solutions/tile-scm/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
-S:	Supported
+S:	Orphan
 F:	arch/tile/
 F:	arch/tile/
 F:	drivers/char/tile-srom.c
 F:	drivers/char/tile-srom.c
 F:	drivers/edac/tile_edac.c
 F:	drivers/edac/tile_edac.c

+ 1 - 1
Makefile

@@ -2,7 +2,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 15
 PATCHLEVEL = 15
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 0 - 1
arch/arm/include/asm/pgtable-3level.h

@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 }
 }
 #define	__HAVE_ARCH_PTE_SPECIAL
 #define	__HAVE_ARCH_PTE_SPECIAL
 
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		(pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_write(pmd)		(pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd)		(pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pmd_dirty(pmd)		(pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pud_page(pud)		pmd_page(__pmd(pud_val(pud)))
 #define pud_page(pud)		pmd_page(__pmd(pud_val(pud)))

+ 2 - 2
arch/arm/kernel/entry-header.S

@@ -300,7 +300,7 @@
 	mov	r2, sp
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
-	tst	r1, #0xcf
+	tst	r1, #PSR_I_BIT | 0x0f
 	bne	1f
 	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@@ -332,7 +332,7 @@
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [sp, #\offset + S_PC]	@ get pc
 	ldr	lr, [sp, #\offset + S_PC]	@ get pc
 	add	sp, sp, #\offset + S_SP
 	add	sp, sp, #\offset + S_SP
-	tst	r1, #0xcf
+	tst	r1, #PSR_I_BIT | 0x0f
 	bne	1f
 	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 
 

+ 0 - 3
arch/arm64/Makefile

@@ -83,9 +83,6 @@ endif
 
 
 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 KBUILD_LDFLAGS_MODULE	+= -T $(srctree)/arch/arm64/kernel/module.lds
 KBUILD_LDFLAGS_MODULE	+= -T $(srctree)/arch/arm64/kernel/module.lds
-ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
-KBUILD_LDFLAGS_MODULE	+= $(objtree)/arch/arm64/kernel/ftrace-mod.o
-endif
 endif
 endif
 
 
 # Default value
 # Default value

+ 1 - 1
arch/arm64/include/asm/cacheflush.h

@@ -38,7 +38,7 @@
  *
  *
  *	See Documentation/cachetlb.txt for more information. Please note that
  *	See Documentation/cachetlb.txt for more information. Please note that
  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
- *	VIPT or ASID-tagged VIVT I-cache.
+ *	VIPT I-cache.
  *
  *
  *	flush_cache_mm(mm)
  *	flush_cache_mm(mm)
  *
  *

+ 45 - 1
arch/arm64/include/asm/module.h

@@ -32,7 +32,7 @@ struct mod_arch_specific {
 	struct mod_plt_sec	init;
 	struct mod_plt_sec	init;
 
 
 	/* for CONFIG_DYNAMIC_FTRACE */
 	/* for CONFIG_DYNAMIC_FTRACE */
-	void			*ftrace_trampoline;
+	struct plt_entry 	*ftrace_trampoline;
 };
 };
 #endif
 #endif
 
 
@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
 #define module_alloc_base	((u64)_etext - MODULES_VSIZE)
 #define module_alloc_base	((u64)_etext - MODULES_VSIZE)
 #endif
 #endif
 
 
+struct plt_entry {
+	/*
+	 * A program that conforms to the AArch64 Procedure Call Standard
+	 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
+	 * IP1 (x17) may be inserted at any branch instruction that is
+	 * exposed to a relocation that supports long branches. Since that
+	 * is exactly what we are dealing with here, we are free to use x16
+	 * as a scratch register in the PLT veneers.
+	 */
+	__le32	mov0;	/* movn	x16, #0x....			*/
+	__le32	mov1;	/* movk	x16, #0x...., lsl #16		*/
+	__le32	mov2;	/* movk	x16, #0x...., lsl #32		*/
+	__le32	br;	/* br	x16				*/
+};
+
+static inline struct plt_entry get_plt_entry(u64 val)
+{
+	/*
+	 * MOVK/MOVN/MOVZ opcode:
+	 * +--------+------------+--------+-----------+-------------+---------+
+	 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
+	 * +--------+------------+--------+-----------+-------------+---------+
+	 *
+	 * Rd     := 0x10 (x16)
+	 * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
+	 * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
+	 * sf     := 1 (64-bit variant)
+	 */
+	return (struct plt_entry){
+		cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
+		cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
+		cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
+		cpu_to_le32(0xd61f0200)
+	};
+}
+
+static inline bool plt_entries_equal(const struct plt_entry *a,
+				     const struct plt_entry *b)
+{
+	return a->mov0 == b->mov0 &&
+	       a->mov1 == b->mov1 &&
+	       a->mov2 == b->mov2;
+}
+
 #endif /* __ASM_MODULE_H */
 #endif /* __ASM_MODULE_H */

+ 0 - 1
arch/arm64/include/asm/pgtable.h

@@ -345,7 +345,6 @@ static inline int pmd_protnone(pmd_t pmd)
 
 
 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
 
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
 
 
 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))

+ 0 - 3
arch/arm64/kernel/Makefile

@@ -61,6 +61,3 @@ extra-y					+= $(head-y) vmlinux.lds
 ifeq ($(CONFIG_DEBUG_EFI),y)
 ifeq ($(CONFIG_DEBUG_EFI),y)
 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
 endif
 endif
-
-# will be included by each individual module but not by the core kernel itself
-extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o

+ 3 - 3
arch/arm64/kernel/cpu_ops.c

@@ -31,13 +31,13 @@ extern const struct cpu_operations cpu_psci_ops;
 
 
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
 
-static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
 	&smp_spin_table_ops,
 	&smp_spin_table_ops,
 	&cpu_psci_ops,
 	&cpu_psci_ops,
 	NULL,
 	NULL,
 };
 };
 
 
-static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *const acpi_supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 	&acpi_parking_protocol_ops,
 	&acpi_parking_protocol_ops,
 #endif
 #endif
@@ -47,7 +47,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
 
 
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
 {
-	const struct cpu_operations **ops;
+	const struct cpu_operations *const *ops;
 
 
 	ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 	ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
 

+ 3 - 3
arch/arm64/kernel/fpsimd.c

@@ -1026,10 +1026,10 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
 
 
 	local_bh_disable();
 	local_bh_disable();
 
 
-	if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
-		current->thread.fpsimd_state = *state;
+	current->thread.fpsimd_state = *state;
+	if (system_supports_sve() && test_thread_flag(TIF_SVE))
 		fpsimd_to_sve(current);
 		fpsimd_to_sve(current);
-	}
+
 	task_fpsimd_load();
 	task_fpsimd_load();
 
 
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {

+ 0 - 18
arch/arm64/kernel/ftrace-mod.S

@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-	.section	".text.ftrace_trampoline", "ax"
-	.align		3
-0:	.quad		0
-__ftrace_trampoline:
-	ldr		x16, 0b
-	br		x16
-ENDPROC(__ftrace_trampoline)

+ 8 - 6
arch/arm64/kernel/ftrace.c

@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 
 	if (offset < -SZ_128M || offset >= SZ_128M) {
 	if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
 #ifdef CONFIG_ARM64_MODULE_PLTS
-		unsigned long *trampoline;
+		struct plt_entry trampoline;
 		struct module *mod;
 		struct module *mod;
 
 
 		/*
 		/*
@@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		 * is added in the future, but for now, the pr_err() below
 		 * is added in the future, but for now, the pr_err() below
 		 * deals with a theoretical issue only.
 		 * deals with a theoretical issue only.
 		 */
 		 */
-		trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
-		if (trampoline[0] != addr) {
-			if (trampoline[0] != 0) {
+		trampoline = get_plt_entry(addr);
+		if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+				       &trampoline)) {
+			if (!plt_entries_equal(mod->arch.ftrace_trampoline,
+					       &(struct plt_entry){})) {
 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 				pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
 
 
 			/* point the trampoline to our ftrace entry point */
 			/* point the trampoline to our ftrace entry point */
 			module_disable_ro(mod);
 			module_disable_ro(mod);
-			trampoline[0] = addr;
+			*mod->arch.ftrace_trampoline = trampoline;
 			module_enable_ro(mod, true);
 			module_enable_ro(mod, true);
 
 
 			/* update trampoline before patching in the branch */
 			/* update trampoline before patching in the branch */
 			smp_wmb();
 			smp_wmb();
 		}
 		}
-		addr = (unsigned long)&trampoline[1];
+		addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
 #else /* CONFIG_ARM64_MODULE_PLTS */
 #else /* CONFIG_ARM64_MODULE_PLTS */
 		return -EINVAL;
 		return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
 #endif /* CONFIG_ARM64_MODULE_PLTS */

+ 14 - 36
arch/arm64/kernel/module-plts.c

@@ -11,21 +11,6 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/sort.h>
 #include <linux/sort.h>
 
 
-struct plt_entry {
-	/*
-	 * A program that conforms to the AArch64 Procedure Call Standard
-	 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
-	 * IP1 (x17) may be inserted at any branch instruction that is
-	 * exposed to a relocation that supports long branches. Since that
-	 * is exactly what we are dealing with here, we are free to use x16
-	 * as a scratch register in the PLT veneers.
-	 */
-	__le32	mov0;	/* movn	x16, #0x....			*/
-	__le32	mov1;	/* movk	x16, #0x...., lsl #16		*/
-	__le32	mov2;	/* movk	x16, #0x...., lsl #32		*/
-	__le32	br;	/* br	x16				*/
-};
-
 static bool in_init(const struct module *mod, void *loc)
 static bool in_init(const struct module *mod, void *loc)
 {
 {
 	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
 	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
 	int i = pltsec->plt_num_entries;
 	int i = pltsec->plt_num_entries;
 	u64 val = sym->st_value + rela->r_addend;
 	u64 val = sym->st_value + rela->r_addend;
 
 
-	/*
-	 * MOVK/MOVN/MOVZ opcode:
-	 * +--------+------------+--------+-----------+-------------+---------+
-	 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
-	 * +--------+------------+--------+-----------+-------------+---------+
-	 *
-	 * Rd     := 0x10 (x16)
-	 * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
-	 * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
-	 * sf     := 1 (64-bit variant)
-	 */
-	plt[i] = (struct plt_entry){
-		cpu_to_le32(0x92800010 | (((~val      ) & 0xffff)) << 5),
-		cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
-		cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
-		cpu_to_le32(0xd61f0200)
-	};
+	plt[i] = get_plt_entry(val);
 
 
 	/*
 	/*
 	 * Check if the entry we just created is a duplicate. Given that the
 	 * Check if the entry we just created is a duplicate. Given that the
 	 * relocations are sorted, this will be the last entry we allocated.
 	 * relocations are sorted, this will be the last entry we allocated.
 	 * (if one exists).
 	 * (if one exists).
 	 */
 	 */
-	if (i > 0 &&
-	    plt[i].mov0 == plt[i - 1].mov0 &&
-	    plt[i].mov1 == plt[i - 1].mov1 &&
-	    plt[i].mov2 == plt[i - 1].mov2)
+	if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
 		return (u64)&plt[i - 1];
 		return (u64)&plt[i - 1];
 
 
 	pltsec->plt_num_entries++;
 	pltsec->plt_num_entries++;
@@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 	unsigned long core_plts = 0;
 	unsigned long core_plts = 0;
 	unsigned long init_plts = 0;
 	unsigned long init_plts = 0;
 	Elf64_Sym *syms = NULL;
 	Elf64_Sym *syms = NULL;
+	Elf_Shdr *tramp = NULL;
 	int i;
 	int i;
 
 
 	/*
 	/*
@@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			mod->arch.core.plt = sechdrs + i;
 			mod->arch.core.plt = sechdrs + i;
 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
 			mod->arch.init.plt = sechdrs + i;
 			mod->arch.init.plt = sechdrs + i;
+		else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+			 !strcmp(secstrings + sechdrs[i].sh_name,
+				 ".text.ftrace_trampoline"))
+			tramp = sechdrs + i;
 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
 	}
 	}
@@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 	mod->arch.init.plt_num_entries = 0;
 	mod->arch.init.plt_num_entries = 0;
 	mod->arch.init.plt_max_entries = init_plts;
 	mod->arch.init.plt_max_entries = init_plts;
 
 
+	if (tramp) {
+		tramp->sh_type = SHT_NOBITS;
+		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+		tramp->sh_addralign = __alignof__(struct plt_entry);
+		tramp->sh_size = sizeof(struct plt_entry);
+	}
+
 	return 0;
 	return 0;
 }
 }

+ 1 - 0
arch/arm64/kernel/module.lds

@@ -1,4 +1,5 @@
 SECTIONS {
 SECTIONS {
 	.plt (NOLOAD) : { BYTE(0) }
 	.plt (NOLOAD) : { BYTE(0) }
 	.init.plt (NOLOAD) : { BYTE(0) }
 	.init.plt (NOLOAD) : { BYTE(0) }
+	.text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
 }
 }

+ 0 - 6
arch/arm64/kernel/perf_event.c

@@ -262,12 +262,6 @@ static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 
 
 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
-
-	[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-	[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-
-	[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
-	[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
 };
 };
 
 
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]

+ 16 - 12
arch/arm64/mm/context.c

@@ -96,12 +96,6 @@ static void flush_context(unsigned int cpu)
 
 
 	set_reserved_asid_bits();
 	set_reserved_asid_bits();
 
 
-	/*
-	 * Ensure the generation bump is observed before we xchg the
-	 * active_asids.
-	 */
-	smp_wmb();
-
 	for_each_possible_cpu(i) {
 	for_each_possible_cpu(i) {
 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
 		/*
 		/*
@@ -117,7 +111,10 @@ static void flush_context(unsigned int cpu)
 		per_cpu(reserved_asids, i) = asid;
 		per_cpu(reserved_asids, i) = asid;
 	}
 	}
 
 
-	/* Queue a TLB invalidate and flush the I-cache if necessary. */
+	/*
+	 * Queue a TLB invalidation for each CPU to perform on next
+	 * context-switch
+	 */
 	cpumask_setall(&tlb_flush_pending);
 	cpumask_setall(&tlb_flush_pending);
 }
 }
 
 
@@ -202,11 +199,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	asid = atomic64_read(&mm->context.id);
 	asid = atomic64_read(&mm->context.id);
 
 
 	/*
 	/*
-	 * The memory ordering here is subtle. We rely on the control
-	 * dependency between the generation read and the update of
-	 * active_asids to ensure that we are synchronised with a
-	 * parallel rollover (i.e. this pairs with the smp_wmb() in
-	 * flush_context).
+	 * The memory ordering here is subtle.
+	 * If our ASID matches the current generation, then we update
+	 * our active_asids entry with a relaxed xchg. Racing with a
+	 * concurrent rollover means that either:
+	 *
+	 * - We get a zero back from the xchg and end up waiting on the
+	 *   lock. Taking the lock synchronises with the rollover and so
+	 *   we are forced to see the updated generation.
+	 *
+	 * - We get a valid ASID back from the xchg, which means the
+	 *   relaxed xchg in flush_context will treat us as reserved
+	 *   because atomic RmWs are totally ordered for a given location.
 	 */
 	 */
 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))

+ 1 - 1
arch/arm64/mm/pgd.c

@@ -26,7 +26,7 @@
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 
 
-static struct kmem_cache *pgd_cache;
+static struct kmem_cache *pgd_cache __ro_after_init;
 
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
 {

+ 1 - 1
arch/mips/include/asm/pgtable.h

@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 		       pmd_t *pmdp, pmd_t pmd);
 		       pmd_t *pmdp, pmd_t pmd);
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 static inline int pmd_write(pmd_t pmd)
 {
 {
 	return !!(pmd_val(pmd) & _PAGE_WRITE);
 	return !!(pmd_val(pmd) & _PAGE_WRITE);

+ 2 - 5
arch/mips/kvm/mips.c

@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 {
 	int r = -EINTR;
 	int r = -EINTR;
-	sigset_t sigsaved;
 
 
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+	kvm_sigset_activate(vcpu);
 
 
 	if (vcpu->mmio_needed) {
 	if (vcpu->mmio_needed) {
 		if (!vcpu->mmio_is_write)
 		if (!vcpu->mmio_is_write)
@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	local_irq_enable();
 	local_irq_enable();
 
 
 out:
 out:
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+	kvm_sigset_deactivate(vcpu);
 
 
 	return r;
 	return r;
 }
 }

+ 0 - 1
arch/powerpc/include/asm/book3s/64/pgtable.h

@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
 }
 }
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_NUMA_BALANCING */
 
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)	__pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)	__pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)	pte_savedwrite(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)	pte_savedwrite(pmd_pte(pmd))

+ 1 - 0
arch/powerpc/include/asm/kvm_ppc.h

@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
 		struct iommu_group *grp);
 		struct iommu_group *grp);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
 
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 				struct kvm_create_spapr_tce_64 *args);
 				struct kvm_create_spapr_tce_64 *args);

+ 2 - 0
arch/powerpc/kernel/misc_64.S

@@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
 	 * NOTE, we rely on r0 being 0 from above.
 	 * NOTE, we rely on r0 being 0 from above.
 	 */
 	 */
 	mtspr	SPRN_IAMR,r0
 	mtspr	SPRN_IAMR,r0
+BEGIN_FTR_SECTION_NESTED(42)
 	mtspr	SPRN_AMOR,r0
 	mtspr	SPRN_AMOR,r0
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
 
 	/* save regs for local vars on new stack.
 	/* save regs for local vars on new stack.

+ 9 - 3
arch/powerpc/kernel/process.c

@@ -1569,16 +1569,22 @@ void arch_release_task_struct(struct task_struct *t)
  */
  */
 int set_thread_tidr(struct task_struct *t)
 int set_thread_tidr(struct task_struct *t)
 {
 {
+	int rc;
+
 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (t != current)
 	if (t != current)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	t->thread.tidr = assign_thread_tidr();
-	if (t->thread.tidr < 0)
-		return t->thread.tidr;
+	if (t->thread.tidr)
+		return 0;
+
+	rc = assign_thread_tidr();
+	if (rc < 0)
+		return rc;
 
 
+	t->thread.tidr = rc;
 	mtspr(SPRN_TIDR, t->thread.tidr);
 	mtspr(SPRN_TIDR, t->thread.tidr);
 
 
 	return 0;
 	return 0;

+ 23 - 14
arch/powerpc/kvm/book3s_64_mmu_hv.c

@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
 	unsigned long vpte, rpte, guest_rpte;
 	unsigned long vpte, rpte, guest_rpte;
 	int ret;
 	int ret;
 	struct revmap_entry *rev;
 	struct revmap_entry *rev;
-	unsigned long apsize, psize, avpn, pteg, hash;
+	unsigned long apsize, avpn, pteg, hash;
 	unsigned long new_idx, new_pteg, replace_vpte;
 	unsigned long new_idx, new_pteg, replace_vpte;
+	int pshift;
 
 
 	hptep = (__be64 *)(old->virt + (idx << 4));
 	hptep = (__be64 *)(old->virt + (idx << 4));
 
 
@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
 		goto out;
 		goto out;
 
 
 	rpte = be64_to_cpu(hptep[1]);
 	rpte = be64_to_cpu(hptep[1]);
-	psize = hpte_base_page_size(vpte, rpte);
-	avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+	pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+	avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
 	pteg = idx / HPTES_PER_GROUP;
 	pteg = idx / HPTES_PER_GROUP;
 	if (vpte & HPTE_V_SECONDARY)
 	if (vpte & HPTE_V_SECONDARY)
 		pteg = ~pteg;
 		pteg = ~pteg;
@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
 		offset = (avpn & 0x1f) << 23;
 		offset = (avpn & 0x1f) << 23;
 		vsid = avpn >> 5;
 		vsid = avpn >> 5;
 		/* We can find more bits from the pteg value */
 		/* We can find more bits from the pteg value */
-		if (psize < (1ULL << 23))
-			offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+		if (pshift < 23)
+			offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
 
 
-		hash = vsid ^ (offset / psize);
+		hash = vsid ^ (offset >> pshift);
 	} else {
 	} else {
 		unsigned long offset, vsid;
 		unsigned long offset, vsid;
 
 
 		/* We only have 40 - 23 bits of seg_off in avpn */
 		/* We only have 40 - 23 bits of seg_off in avpn */
 		offset = (avpn & 0x1ffff) << 23;
 		offset = (avpn & 0x1ffff) << 23;
 		vsid = avpn >> 17;
 		vsid = avpn >> 17;
-		if (psize < (1ULL << 23))
-			offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+		if (pshift < 23)
+			offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
 
 
-		hash = vsid ^ (vsid << 25) ^ (offset / psize);
+		hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
 	}
 	}
 
 
 	new_pteg = hash & new_hash_mask;
 	new_pteg = hash & new_hash_mask;
@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
 	ssize_t nb;
 	ssize_t nb;
 	long int err, ret;
 	long int err, ret;
 	int mmu_ready;
 	int mmu_ready;
+	int pshift;
 
 
 	if (!access_ok(VERIFY_READ, buf, count))
 	if (!access_ok(VERIFY_READ, buf, count))
 		return -EFAULT;
 		return -EFAULT;
@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
 			err = -EINVAL;
 			err = -EINVAL;
 			if (!(v & HPTE_V_VALID))
 			if (!(v & HPTE_V_VALID))
 				goto out;
 				goto out;
+			pshift = kvmppc_hpte_base_page_shift(v, r);
+			if (pshift <= 0)
+				goto out;
 			lbuf += 2;
 			lbuf += 2;
 			nb += HPTE_SIZE;
 			nb += HPTE_SIZE;
 
 
@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
 				goto out;
 				goto out;
 			}
 			}
 			if (!mmu_ready && is_vrma_hpte(v)) {
 			if (!mmu_ready && is_vrma_hpte(v)) {
-				unsigned long psize = hpte_base_page_size(v, r);
-				unsigned long senc = slb_pgsize_encoding(psize);
-				unsigned long lpcr;
+				unsigned long senc, lpcr;
 
 
+				senc = slb_pgsize_encoding(1ul << pshift);
 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 					(VRMA_VSID << SLB_VSID_SHIFT_1T);
 					(VRMA_VSID << SLB_VSID_SHIFT_1T);
-				lpcr = senc << (LPCR_VRMASD_SH - 4);
-				kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+				if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+					lpcr = senc << (LPCR_VRMASD_SH - 4);
+					kvmppc_update_lpcr(kvm, lpcr,
+							   LPCR_VRMASD);
+				} else {
+					kvmppc_setup_partition_table(kvm);
+				}
 				mmu_ready = 1;
 				mmu_ready = 1;
 			}
 			}
 			++i;
 			++i;

+ 1 - 2
arch/powerpc/kvm/book3s_hv.c

@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
 
 
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
 		int *ip)
 		int *ip)
@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
 	return;
 	return;
 }
 }
 
 
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
 {
 {
 	unsigned long dw0, dw1;
 	unsigned long dw0, dw1;
 
 

+ 2 - 5
arch/powerpc/kvm/powerpc.c

@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 {
 	int r;
 	int r;
-	sigset_t sigsaved;
 
 
 	if (vcpu->mmio_needed) {
 	if (vcpu->mmio_needed) {
 		vcpu->mmio_needed = 0;
 		vcpu->mmio_needed = 0;
@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #endif
 #endif
 	}
 	}
 
 
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+	kvm_sigset_activate(vcpu);
 
 
 	if (run->immediate_exit)
 	if (run->immediate_exit)
 		r = -EINTR;
 		r = -EINTR;
 	else
 	else
 		r = kvmppc_vcpu_run(run, vcpu);
 		r = kvmppc_vcpu_run(run, vcpu);
 
 
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+	kvm_sigset_deactivate(vcpu);
 
 
 	return r;
 	return r;
 }
 }

+ 12 - 3
arch/powerpc/mm/hash_native_64.c

@@ -47,7 +47,8 @@
 
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
 
-static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
+						int apsize, int ssize)
 {
 {
 	unsigned long va;
 	unsigned long va;
 	unsigned int penc;
 	unsigned int penc;
@@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 			     : "memory");
 			     : "memory");
 		break;
 		break;
 	}
 	}
-	trace_tlbie(0, 0, va, 0, 0, 0, 0);
+	return va;
+}
+
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+	unsigned long rb;
+
+	rb = ___tlbie(vpn, psize, apsize, ssize);
+	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
 }
 }
 
 
 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@@ -652,7 +661,7 @@ static void native_hpte_clear(void)
 		if (hpte_v & HPTE_V_VALID) {
 		if (hpte_v & HPTE_V_VALID) {
 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
 			hptep->v = 0;
 			hptep->v = 0;
-			__tlbie(vpn, psize, apsize, ssize);
+			___tlbie(vpn, psize, apsize, ssize);
 		}
 		}
 	}
 	}
 
 

+ 1 - 0
arch/riscv/include/asm/Kbuild

@@ -40,6 +40,7 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += sections.h
 generic-y += sembuf.h
 generic-y += sembuf.h
+generic-y += serial.h
 generic-y += setup.h
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
 generic-y += shmparam.h

+ 6 - 6
arch/riscv/include/asm/asm.h

@@ -58,17 +58,17 @@
 #endif
 #endif
 
 
 #if (__SIZEOF_INT__ == 4)
 #if (__SIZEOF_INT__ == 4)
-#define INT		__ASM_STR(.word)
-#define SZINT		__ASM_STR(4)
-#define LGINT		__ASM_STR(2)
+#define RISCV_INT		__ASM_STR(.word)
+#define RISCV_SZINT		__ASM_STR(4)
+#define RISCV_LGINT		__ASM_STR(2)
 #else
 #else
 #error "Unexpected __SIZEOF_INT__"
 #error "Unexpected __SIZEOF_INT__"
 #endif
 #endif
 
 
 #if (__SIZEOF_SHORT__ == 2)
 #if (__SIZEOF_SHORT__ == 2)
-#define SHORT		__ASM_STR(.half)
-#define SZSHORT		__ASM_STR(2)
-#define LGSHORT		__ASM_STR(1)
+#define RISCV_SHORT		__ASM_STR(.half)
+#define RISCV_SZSHORT		__ASM_STR(2)
+#define RISCV_LGSHORT		__ASM_STR(1)
 #else
 #else
 #error "Unexpected __SIZEOF_SHORT__"
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
 #endif

+ 54 - 49
arch/riscv/include/asm/atomic.h

@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
  * have the AQ or RL bits set.  These don't return anything, so there's only
  * have the AQ or RL bits set.  These don't return anything, so there's only
  * one version to worry about.
  * one version to worry about.
  */
  */
-#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix)				\
-static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
-{												\
-	__asm__ __volatile__ (									\
-		"amo" #asm_op "." #asm_type " zero, %1, %0"					\
-		: "+A" (v->counter)								\
-		: "r" (I)									\
-		: "memory");									\
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)				\
+static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)	\
+{											\
+	__asm__ __volatile__ (								\
+		"amo" #asm_op "." #asm_type " zero, %1, %0"				\
+		: "+A" (v->counter)							\
+		: "r" (I)								\
+		: "memory");								\
 }
 }
 
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I)			\
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I)			\
+        ATOMIC_OP (op, asm_op, I, w,  int,   )
 #else
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I)			\
-        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )	\
-        ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I)			\
+        ATOMIC_OP (op, asm_op, I, w,  int,   )	\
+        ATOMIC_OP (op, asm_op, I, d, long, 64)
 #endif
 #endif
 
 
-ATOMIC_OPS(add, add, +,  i)
-ATOMIC_OPS(sub, add, +, -i)
-ATOMIC_OPS(and, and, &,  i)
-ATOMIC_OPS( or,  or, |,  i)
-ATOMIC_OPS(xor, xor, ^,  i)
+ATOMIC_OPS(add, add,  i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and,  i)
+ATOMIC_OPS( or,  or,  i)
+ATOMIC_OPS(xor, xor,  i)
 
 
 #undef ATOMIC_OP
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
 #undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^,  i)
  * There's two flavors of these: the arithmatic ops have both fetch and return
  * There's two flavors of these: the arithmatic ops have both fetch and return
  * versions, while the logical ops only have fetch versions.
  * versions, while the logical ops only have fetch versions.
  */
  */
-#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)			\
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)				\
 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)	\
 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)	\
 {													\
 {													\
 	register c_type ret;										\
 	register c_type ret;										\
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
 
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
 #else
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64)	\
+        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)	\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
 #endif
 #endif
 
 
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl,         )
 #undef ATOMIC_OPS
 #undef ATOMIC_OPS
 
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
 #else
 #else
-#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )		\
-        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )	\
+        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
 #endif
 #endif
 
 
-ATOMIC_OPS(and, and, &,  i,      , _relaxed)
-ATOMIC_OPS(and, and, &,  i, .aq  , _acquire)
-ATOMIC_OPS(and, and, &,  i, .rl  , _release)
-ATOMIC_OPS(and, and, &,  i, .aqrl,         )
+ATOMIC_OPS(and, and, i,      , _relaxed)
+ATOMIC_OPS(and, and, i, .aq  , _acquire)
+ATOMIC_OPS(and, and, i, .rl  , _release)
+ATOMIC_OPS(and, and, i, .aqrl,         )
 
 
-ATOMIC_OPS( or,  or, |,  i,      , _relaxed)
-ATOMIC_OPS( or,  or, |,  i, .aq  , _acquire)
-ATOMIC_OPS( or,  or, |,  i, .rl  , _release)
-ATOMIC_OPS( or,  or, |,  i, .aqrl,         )
+ATOMIC_OPS( or,  or, i,      , _relaxed)
+ATOMIC_OPS( or,  or, i, .aq  , _acquire)
+ATOMIC_OPS( or,  or, i, .rl  , _release)
+ATOMIC_OPS( or,  or, i, .aqrl,         )
 
 
-ATOMIC_OPS(xor, xor, ^,  i,      , _relaxed)
-ATOMIC_OPS(xor, xor, ^,  i, .aq  , _acquire)
-ATOMIC_OPS(xor, xor, ^,  i, .rl  , _release)
-ATOMIC_OPS(xor, xor, ^,  i, .aqrl,         )
+ATOMIC_OPS(xor, xor, i,      , _relaxed)
+ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
+ATOMIC_OPS(xor, xor, i, .rl  , _release)
+ATOMIC_OPS(xor, xor, i, .aqrl,         )
 
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OPS
 
 
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add,  <, 0)
 #undef ATOMIC_OP
 #undef ATOMIC_OP
 #undef ATOMIC_OPS
 #undef ATOMIC_OPS
 
 
-#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix)				\
+#define ATOMIC_OP(op, func_op, I, c_type, prefix)				\
 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)	\
 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)	\
 {										\
 {										\
 	atomic##prefix##_##func_op(I, v);					\
 	atomic##prefix##_##func_op(I, v);					\
 }
 }
 
 
-#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix)				\
+#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)					\
 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)	\
 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)	\
 {											\
 {											\
 	return atomic##prefix##_fetch_##func_op(I, v);					\
 	return atomic##prefix##_fetch_##func_op(I, v);					\
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
 
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #ifdef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC_OPS(op, asm_op, c_op, I)						\
 #define ATOMIC_OPS(op, asm_op, c_op, I)						\
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )				\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )				\
+        ATOMIC_OP       (op, asm_op,       I,  int,   )				\
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )				\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
 #else
 #else
 #define ATOMIC_OPS(op, asm_op, c_op, I)						\
 #define ATOMIC_OPS(op, asm_op, c_op, I)						\
-        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )				\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )				\
+        ATOMIC_OP       (op, asm_op,       I,  int,   )				\
+        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )				\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )				\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )				\
-        ATOMIC_OP       (op, asm_op, c_op, I, long, 64)				\
-        ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64)				\
+        ATOMIC_OP       (op, asm_op,       I, long, 64)				\
+        ATOMIC_FETCH_OP (op, asm_op,       I, long, 64)				\
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
         ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
 #endif
 #endif
 
 
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
 
 
 /*
 /*
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a barrier.  We just
- * use the other implementations directly.
+ * {cmp,}xchg and the operations that return, so they need a barrier.
+ */
+/*
+ * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
+ * assigning the same barrier to both the LR and SC operations, but that might
+ * not make any sense.  We're waiting on a memory model specification to
+ * determine exactly what the right thing to do is here.
  */
  */
 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)						\
 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)						\
 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) 	\
 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) 	\

+ 0 - 23
arch/riscv/include/asm/barrier.h

@@ -38,29 +38,6 @@
 #define smp_rmb()	RISCV_FENCE(r,r)
 #define smp_rmb()	RISCV_FENCE(r,r)
 #define smp_wmb()	RISCV_FENCE(w,w)
 #define smp_wmb()	RISCV_FENCE(w,w)
 
 
-/*
- * These fences exist to enforce ordering around the relaxed AMOs.  The
- * documentation defines that
- * "
- *     atomic_fetch_add();
- *   is equivalent to:
- *     smp_mb__before_atomic();
- *     atomic_fetch_add_relaxed();
- *     smp_mb__after_atomic();
- * "
- * So we emit full fences on both sides.
- */
-#define __smb_mb__before_atomic()	smp_mb()
-#define __smb_mb__after_atomic()	smp_mb()
-
-/*
- * These barriers prevent accesses performed outside a spinlock from being moved
- * inside a spinlock.  Since RISC-V sets the aq/rl bits on our spinlock only
- * enforce release consistency, we need full fences here.
- */
-#define smb_mb__before_spinlock()	smp_mb()
-#define smb_mb__after_spinlock()	smp_mb()
-
 #include <asm-generic/barrier.h>
 #include <asm-generic/barrier.h>
 
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */

+ 1 - 1
arch/riscv/include/asm/bitops.h

@@ -67,7 +67,7 @@
 		: "memory");
 		: "memory");
 
 
 #define __test_and_op_bit(op, mod, nr, addr) 			\
 #define __test_and_op_bit(op, mod, nr, addr) 			\
-	__test_and_op_bit_ord(op, mod, nr, addr, )
+	__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
 #define __op_bit(op, mod, nr, addr)				\
 #define __op_bit(op, mod, nr, addr)				\
 	__op_bit_ord(op, mod, nr, addr, )
 	__op_bit_ord(op, mod, nr, addr, )
 
 

+ 3 - 3
arch/riscv/include/asm/bug.h

@@ -27,8 +27,8 @@
 typedef u32 bug_insn_t;
 typedef u32 bug_insn_t;
 
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
-#define __BUG_ENTRY_ADDR	INT " 1b - 2b"
-#define __BUG_ENTRY_FILE	INT " %0 - 2b"
+#define __BUG_ENTRY_ADDR	RISCV_INT " 1b - 2b"
+#define __BUG_ENTRY_FILE	RISCV_INT " %0 - 2b"
 #else
 #else
 #define __BUG_ENTRY_ADDR	RISCV_PTR " 1b"
 #define __BUG_ENTRY_ADDR	RISCV_PTR " 1b"
 #define __BUG_ENTRY_FILE	RISCV_PTR " %0"
 #define __BUG_ENTRY_FILE	RISCV_PTR " %0"
@@ -38,7 +38,7 @@ typedef u32 bug_insn_t;
 #define __BUG_ENTRY			\
 #define __BUG_ENTRY			\
 	__BUG_ENTRY_ADDR "\n\t"		\
 	__BUG_ENTRY_ADDR "\n\t"		\
 	__BUG_ENTRY_FILE "\n\t"		\
 	__BUG_ENTRY_FILE "\n\t"		\
-	SHORT " %1"
+	RISCV_SHORT " %1"
 #else
 #else
 #define __BUG_ENTRY			\
 #define __BUG_ENTRY			\
 	__BUG_ENTRY_ADDR
 	__BUG_ENTRY_ADDR

+ 26 - 4
arch/riscv/include/asm/cacheflush.h

@@ -18,22 +18,44 @@
 
 
 #undef flush_icache_range
 #undef flush_icache_range
 #undef flush_icache_user_range
 #undef flush_icache_user_range
+#undef flush_dcache_page
 
 
 static inline void local_flush_icache_all(void)
 static inline void local_flush_icache_all(void)
 {
 {
 	asm volatile ("fence.i" ::: "memory");
 	asm volatile ("fence.i" ::: "memory");
 }
 }
 
 
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_page(struct page *page)
+{
+	if (test_bit(PG_dcache_clean, &page->flags))
+		clear_bit(PG_dcache_clean, &page->flags);
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
+
 #ifndef CONFIG_SMP
 #ifndef CONFIG_SMP
 
 
-#define flush_icache_range(start, end) local_flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
 
 
 #else /* CONFIG_SMP */
 #else /* CONFIG_SMP */
 
 
-#define flush_icache_range(start, end) sbi_remote_fence_i(0)
-#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
+#define flush_icache_all() sbi_remote_fence_i(0)
+void flush_icache_mm(struct mm_struct *mm, bool local);
 
 
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
 #endif /* _ASM_RISCV_CACHEFLUSH_H */
 #endif /* _ASM_RISCV_CACHEFLUSH_H */

+ 10 - 8
arch/riscv/include/asm/io.h

@@ -19,6 +19,8 @@
 #ifndef _ASM_RISCV_IO_H
 #ifndef _ASM_RISCV_IO_H
 #define _ASM_RISCV_IO_H
 #define _ASM_RISCV_IO_H
 
 
+#include <linux/types.h>
+
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -32,7 +34,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 #define ioremap_wc(addr, size) ioremap((addr), (size))
 #define ioremap_wc(addr, size) ioremap((addr), (size))
 #define ioremap_wt(addr, size) ioremap((addr), (size))
 #define ioremap_wt(addr, size) ioremap((addr), (size))
 
 
-extern void iounmap(void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
 
 
 #endif /* CONFIG_MMU */
 #endif /* CONFIG_MMU */
 
 
@@ -250,7 +252,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 			const ctype *buf = buffer;				\
 			const ctype *buf = buffer;				\
 										\
 										\
 			do {							\
 			do {							\
-				__raw_writeq(*buf++, addr);			\
+				__raw_write ## len(*buf++, addr);		\
 			} while (--count);					\
 			} while (--count);					\
 		}								\
 		}								\
 		afence;								\
 		afence;								\
@@ -266,9 +268,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar())
 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par())
 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
-#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
+#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
 
 
 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -280,9 +282,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())
 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())

+ 4 - 0
arch/riscv/include/asm/mmu.h

@@ -19,6 +19,10 @@
 
 
 typedef struct {
 typedef struct {
 	void *vdso;
 	void *vdso;
+#ifdef CONFIG_SMP
+	/* A local icache flush is needed before user execution can resume. */
+	cpumask_t icache_stale_mask;
+#endif
 } mm_context_t;
 } mm_context_t;
 
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */

+ 45 - 0
arch/riscv/include/asm/mmu_context.h

@@ -1,5 +1,6 @@
 /*
 /*
  * Copyright (C) 2012 Regents of the University of California
  * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
  *
  *
  *   This program is free software; you can redistribute it and/or
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
  *   modify it under the terms of the GNU General Public License
@@ -14,11 +15,13 @@
 #ifndef _ASM_RISCV_MMU_CONTEXT_H
 #ifndef _ASM_RISCV_MMU_CONTEXT_H
 #define _ASM_RISCV_MMU_CONTEXT_H
 #define _ASM_RISCV_MMU_CONTEXT_H
 
 
+#include <linux/mm_types.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm-generic/mm_hooks.h>
 
 
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
 static inline void enter_lazy_tlb(struct mm_struct *mm,
 	struct task_struct *task)
 	struct task_struct *task)
@@ -46,12 +49,54 @@ static inline void set_pgdir(pgd_t *pgd)
 	csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
 	csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
 }
 }
 
 
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+	unsigned int cpu = smp_processor_id();
+	cpumask_t *mask = &mm->context.icache_stale_mask;
+
+	if (cpumask_test_cpu(cpu, mask)) {
+		cpumask_clear_cpu(cpu, mask);
+		/*
+		 * Ensure the remote hart's writes are visible to this hart.
+		 * This pairs with a barrier in flush_icache_mm.
+		 */
+		smp_mb();
+		local_flush_icache_all();
+	}
+#endif
+}
+
 static inline void switch_mm(struct mm_struct *prev,
 static inline void switch_mm(struct mm_struct *prev,
 	struct mm_struct *next, struct task_struct *task)
 	struct mm_struct *next, struct task_struct *task)
 {
 {
 	if (likely(prev != next)) {
 	if (likely(prev != next)) {
+		/*
+		 * Mark the current MM context as inactive, and the next as
+		 * active.  This is at least used by the icache flushing
+		 * routines in order to determine who should
+		 */
+		unsigned int cpu = smp_processor_id();
+
+		cpumask_clear_cpu(cpu, mm_cpumask(prev));
+		cpumask_set_cpu(cpu, mm_cpumask(next));
+
 		set_pgdir(next->pgd);
 		set_pgdir(next->pgd);
 		local_flush_tlb_all();
 		local_flush_tlb_all();
+
+		flush_icache_deferred(next);
 	}
 	}
 }
 }
 
 

+ 32 - 26
arch/riscv/include/asm/pgtable.h

@@ -178,28 +178,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
 #define pte_offset_map(dir, addr)	pte_offset_kernel((dir), (addr))
 #define pte_offset_map(dir, addr)	pte_offset_kernel((dir), (addr))
 #define pte_unmap(pte)			((void)(pte))
 #define pte_unmap(pte)			((void)(pte))
 
 
-/*
- * Certain architectures need to do special things when PTEs within
- * a page table are directly modified.  Thus, the following hook is
- * made available.
- */
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
-	*ptep = pteval;
-}
-
-static inline void set_pte_at(struct mm_struct *mm,
-	unsigned long addr, pte_t *ptep, pte_t pteval)
-{
-	set_pte(ptep, pteval);
-}
-
-static inline void pte_clear(struct mm_struct *mm,
-	unsigned long addr, pte_t *ptep)
-{
-	set_pte_at(mm, addr, ptep, __pte(0));
-}
-
 static inline int pte_present(pte_t pte)
 static inline int pte_present(pte_t pte)
 {
 {
 	return (pte_val(pte) & _PAGE_PRESENT);
 	return (pte_val(pte) & _PAGE_PRESENT);
@@ -210,21 +188,22 @@ static inline int pte_none(pte_t pte)
 	return (pte_val(pte) == 0);
 	return (pte_val(pte) == 0);
 }
 }
 
 
-/* static inline int pte_read(pte_t pte) */
-
 static inline int pte_write(pte_t pte)
 static inline int pte_write(pte_t pte)
 {
 {
 	return pte_val(pte) & _PAGE_WRITE;
 	return pte_val(pte) & _PAGE_WRITE;
 }
 }
 
 
+static inline int pte_exec(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_EXEC;
+}
+
 static inline int pte_huge(pte_t pte)
 static inline int pte_huge(pte_t pte)
 {
 {
 	return pte_present(pte)
 	return pte_present(pte)
 		&& (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
 		&& (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
 }
 }
 
 
-/* static inline int pte_exec(pte_t pte) */
-
 static inline int pte_dirty(pte_t pte)
 static inline int pte_dirty(pte_t pte)
 {
 {
 	return pte_val(pte) & _PAGE_DIRTY;
 	return pte_val(pte) & _PAGE_DIRTY;
@@ -311,6 +290,33 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
 	return pte_val(pte_a) == pte_val(pte_b);
 	return pte_val(pte_a) == pte_val(pte_b);
 }
 }
 
 
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified.  Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+	*ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void set_pte_at(struct mm_struct *mm,
+	unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+	if (pte_present(pteval) && pte_exec(pteval))
+		flush_icache_pte(pteval);
+
+	set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+	unsigned long addr, pte_t *ptep)
+{
+	set_pte_at(mm, addr, ptep, __pte(0));
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 					unsigned long address, pte_t *ptep,
 					unsigned long address, pte_t *ptep,

+ 1 - 10
arch/riscv/include/asm/spinlock.h

@@ -24,7 +24,7 @@
 
 
 /* FIXME: Replace this with a ticket lock, like MIPS. */
 /* FIXME: Replace this with a ticket lock, like MIPS. */
 
 
-#define arch_spin_is_locked(x)	((x)->lock != 0)
+#define arch_spin_is_locked(x)	(READ_ONCE((x)->lock) != 0)
 
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
 {
@@ -58,15 +58,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	}
 	}
 }
 }
 
 
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-	smp_rmb();
-	do {
-		cpu_relax();
-	} while (arch_spin_is_locked(lock));
-	smp_acquire__after_ctrl_dep();
-}
-
 /***********************************************************/
 /***********************************************************/
 
 
 static inline void arch_read_lock(arch_rwlock_t *lock)
 static inline void arch_read_lock(arch_rwlock_t *lock)

+ 2 - 1
arch/riscv/include/asm/timex.h

@@ -18,7 +18,7 @@
 
 
 typedef unsigned long cycles_t;
 typedef unsigned long cycles_t;
 
 
-static inline cycles_t get_cycles(void)
+static inline cycles_t get_cycles_inline(void)
 {
 {
 	cycles_t n;
 	cycles_t n;
 
 
@@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void)
 		: "=r" (n));
 		: "=r" (n));
 	return n;
 	return n;
 }
 }
+#define get_cycles get_cycles_inline
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 static inline uint64_t get_cycles64(void)
 static inline uint64_t get_cycles64(void)

+ 6 - 1
arch/riscv/include/asm/tlbflush.h

@@ -17,7 +17,12 @@
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 
 
-/* Flush entire local TLB */
+#include <linux/mm_types.h>
+
+/*
+ * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
+ * cache as well, so a 'fence.i' is not necessary.
+ */
 static inline void local_flush_tlb_all(void)
 static inline void local_flush_tlb_all(void)
 {
 {
 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
 	__asm__ __volatile__ ("sfence.vma" : : : "memory");

+ 28 - 0
arch/riscv/include/asm/vdso-syscalls.h

@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
+#define _ASM_RISCV_VDSO_SYSCALLS_H
+
+#ifdef CONFIG_SMP
+
+/* These syscalls are only used by the vDSO and are not in the uapi. */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
+
+#endif /* _ASM_RISCV_VDSO_H */

+ 4 - 0
arch/riscv/include/asm/vdso.h

@@ -38,4 +38,8 @@ struct vdso_data {
 	(void __user *)((unsigned long)(base) + __vdso_##name);			\
 	(void __user *)((unsigned long)(base) + __vdso_##name);			\
 })
 })
 
 
+#ifdef CONFIG_SMP
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif
+
 #endif /* _ASM_RISCV_VDSO_H */
 #endif /* _ASM_RISCV_VDSO_H */

+ 0 - 3
arch/riscv/kernel/head.S

@@ -152,6 +152,3 @@ END(_start)
 __PAGE_ALIGNED_BSS
 __PAGE_ALIGNED_BSS
 	/* Empty zero page */
 	/* Empty zero page */
 	.balign PAGE_SIZE
 	.balign PAGE_SIZE
-ENTRY(empty_zero_page)
-	.fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
-END(empty_zero_page)

+ 3 - 0
arch/riscv/kernel/riscv_ksyms.c

@@ -12,4 +12,7 @@
 /*
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  * Assembly functions that may be used (directly or indirectly) by modules
  */
  */
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);

+ 5 - 0
arch/riscv/kernel/setup.c

@@ -58,7 +58,12 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 #endif /* CONFIG_CMDLINE_BOOL */
 #endif /* CONFIG_CMDLINE_BOOL */
 
 
 unsigned long va_pa_offset;
 unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
 unsigned long pfn_base;
 unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
 
 
 /* The lucky hart to first increment this variable will boot the other cores */
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
 atomic_t hart_lottery;

+ 55 - 0
arch/riscv/kernel/smp.c

@@ -38,6 +38,13 @@ enum ipi_message_type {
 	IPI_MAX
 	IPI_MAX
 };
 };
 
 
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+	return -EINVAL;
+}
+
 irqreturn_t handle_ipi(void)
 irqreturn_t handle_ipi(void)
 {
 {
 	unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
 	unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -108,3 +115,51 @@ void smp_send_reschedule(int cpu)
 {
 {
 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 }
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+	unsigned int cpu;
+	cpumask_t others, *mask;
+
+	preempt_disable();
+
+	/* Mark every hart's icache as needing a flush for this MM. */
+	mask = &mm->context.icache_stale_mask;
+	cpumask_setall(mask);
+	/* Flush this hart's I$ now, and mark it as flushed. */
+	cpu = smp_processor_id();
+	cpumask_clear_cpu(cpu, mask);
+	local_flush_icache_all();
+
+	/*
+	 * Flush the I$ of other harts concurrently executing, and mark them as
+	 * flushed.
+	 */
+	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+	local |= cpumask_empty(&others);
+	if (mm != current->active_mm || !local)
+		sbi_remote_fence_i(others.bits);
+	else {
+		/*
+		 * It's assumed that at least one strongly ordered operation is
+		 * performed on this hart between setting a hart's cpumask bit
+		 * and scheduling this MM context on that hart.  Sending an SBI
+		 * remote message will do this, but in the case where no
+		 * messages are sent we still need to order this hart's writes
+		 * with flush_icache_deferred().
+		 */
+		smp_mb();
+	}
+
+	preempt_enable();
+}

+ 32 - 1
arch/riscv/kernel/sys_riscv.c

@@ -14,8 +14,8 @@
  */
  */
 
 
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
-#include <asm/cmpxchg.h>
 #include <asm/unistd.h>
 #include <asm/unistd.h>
+#include <asm/cacheflush.h>
 
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
 			   unsigned long prot, unsigned long flags,
 			   unsigned long prot, unsigned long flags,
@@ -47,3 +47,34 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
 	return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 	return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 }
 }
 #endif /* !CONFIG_64BIT */
 #endif /* !CONFIG_64BIT */
+
+#ifdef CONFIG_SMP
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+	uintptr_t, flags)
+{
+	struct mm_struct *mm = current->mm;
+	bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
+
+	/* Check the reserved flags. */
+	if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL))
+		return -EINVAL;
+
+	flush_icache_mm(mm, local);
+
+	return 0;
+}
+#endif

+ 2 - 0
arch/riscv/kernel/syscall_table.c

@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
 #include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
 
 
 #undef __SYSCALL
 #undef __SYSCALL
 #define __SYSCALL(nr, call)	[nr] = (call),
 #define __SYSCALL(nr, call)	[nr] = (call),
@@ -22,4 +23,5 @@
 void *sys_call_table[__NR_syscalls] = {
 void *sys_call_table[__NR_syscalls] = {
 	[0 ... __NR_syscalls - 1] = sys_ni_syscall,
 	[0 ... __NR_syscalls - 1] = sys_ni_syscall,
 #include <asm/unistd.h>
 #include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
 };
 };

+ 6 - 1
arch/riscv/kernel/vdso/Makefile

@@ -1,7 +1,12 @@
 # Copied from arch/tile/kernel/vdso/Makefile
 # Copied from arch/tile/kernel/vdso/Makefile
 
 
 # Symbols present in the vdso
 # Symbols present in the vdso
-vdso-syms = rt_sigreturn
+vdso-syms  = rt_sigreturn
+vdso-syms += gettimeofday
+vdso-syms += clock_gettime
+vdso-syms += clock_getres
+vdso-syms += getcpu
+vdso-syms += flush_icache
 
 
 # Files to link into the vdso
 # Files to link into the vdso
 obj-vdso = $(patsubst %, %.o, $(vdso-syms))
 obj-vdso = $(patsubst %, %.o, $(vdso-syms))

+ 26 - 0
arch/riscv/kernel/vdso/clock_getres.S

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__vdso_clock_getres)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_clock_getres
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_clock_getres)

+ 26 - 0
arch/riscv/kernel/vdso/clock_gettime.S

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__vdso_clock_gettime)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_clock_gettime
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_clock_gettime)

+ 31 - 0
arch/riscv/kernel/vdso/flush_icache.S

@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/vdso-syscalls.h>
+
+	.text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+	.cfi_startproc
+#ifdef CONFIG_SMP
+	li a7, __NR_riscv_flush_icache
+	ecall
+#else
+	fence.i
+	li a0, 0
+#endif
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_flush_icache)

+ 26 - 0
arch/riscv/kernel/vdso/getcpu.S

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_getcpu
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_getcpu)

+ 26 - 0
arch/riscv/kernel/vdso/gettimeofday.S

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__vdso_gettimeofday)
+	.cfi_startproc
+	/* For now, just do the syscall. */
+	li a7, __NR_gettimeofday
+	ecall
+	ret
+	.cfi_endproc
+ENDPROC(__vdso_gettimeofday)

+ 5 - 2
arch/riscv/kernel/vdso/vdso.lds.S

@@ -70,8 +70,11 @@ VERSION
 	LINUX_4.15 {
 	LINUX_4.15 {
 	global:
 	global:
 		__vdso_rt_sigreturn;
 		__vdso_rt_sigreturn;
-		__vdso_cmpxchg32;
-		__vdso_cmpxchg64;
+		__vdso_gettimeofday;
+		__vdso_clock_gettime;
+		__vdso_clock_getres;
+		__vdso_getcpu;
+		__vdso_flush_icache;
 	local: *;
 	local: *;
 	};
 	};
 }
 }

+ 1 - 0
arch/riscv/lib/delay.c

@@ -84,6 +84,7 @@ void __delay(unsigned long cycles)
 	while ((unsigned long)(get_cycles() - t0) < cycles)
 	while ((unsigned long)(get_cycles() - t0) < cycles)
 		cpu_relax();
 		cpu_relax();
 }
 }
+EXPORT_SYMBOL(__delay);
 
 
 void udelay(unsigned long usecs)
 void udelay(unsigned long usecs)
 {
 {

+ 1 - 0
arch/riscv/mm/Makefile

@@ -2,3 +2,4 @@ obj-y += init.o
 obj-y += fault.o
 obj-y += fault.o
 obj-y += extable.o
 obj-y += extable.o
 obj-y += ioremap.o
 obj-y += ioremap.o
+obj-y += cacheflush.o

+ 23 - 0
arch/riscv/mm/cacheflush.c

@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+void flush_icache_pte(pte_t pte)
+{
+	struct page *page = pte_page(pte);
+
+	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+		flush_icache_all();
+}

+ 1 - 1
arch/riscv/mm/ioremap.c

@@ -85,7 +85,7 @@ EXPORT_SYMBOL(ioremap);
  *
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  * Caller must ensure there is only one unmapping for the same pointer.
  */
  */
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
 {
 	vunmap((void *)((unsigned long)addr & PAGE_MASK));
 	vunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
 }

+ 1 - 4
arch/s390/Makefile

@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 #
 # s390/Makefile
 # s390/Makefile
 #
 #
@@ -6,10 +7,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 # this architecture
 #
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1994 by Linus Torvalds
 # Copyright (C) 1994 by Linus Torvalds
 #
 #
 
 

+ 1 - 0
arch/s390/appldata/appldata_base.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
  * Exports appldata_register_ops() and appldata_unregister_ops() for the

+ 1 - 0
arch/s390/appldata/appldata_mem.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects data related to memory management.
  * Collects data related to memory management.

+ 1 - 0
arch/s390/appldata/appldata_net_sum.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects accumulated network statistics (Packets received/transmitted,
  * Collects accumulated network statistics (Packets received/transmitted,

+ 1 - 0
arch/s390/appldata/appldata_os.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects misc. OS related data (CPU utilization, running processes).
  * Collects misc. OS related data (CPU utilization, running processes).

+ 1 - 4
arch/s390/boot/install.sh

@@ -1,11 +1,8 @@
 #!/bin/sh
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
 #
 #
 # arch/s390x/boot/install.sh
 # arch/s390x/boot/install.sh
 #
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1995 by Linus Torvalds
 # Copyright (C) 1995 by Linus Torvalds
 #
 #
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin

+ 1 - 6
arch/s390/crypto/aes_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -11,12 +12,6 @@
  *		Harald Freudenberger <freude@de.ibm.com>
  *		Harald Freudenberger <freude@de.ibm.com>
  *
  *
  * Derived from "crypto/aes_generic.c"
  * Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
  */
 
 
 #define KMSG_COMPONENT "aes_s390"
 #define KMSG_COMPONENT "aes_s390"

+ 1 - 5
arch/s390/crypto/arch_random.c

@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * s390 arch random implementation.
  * s390 arch random implementation.
  *
  *
  * Copyright IBM Corp. 2017
  * Copyright IBM Corp. 2017
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
  */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>

+ 1 - 0
arch/s390/crypto/crc32-vx.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Crypto-API module for CRC-32 algorithms implemented with the
  * Crypto-API module for CRC-32 algorithms implemented with the
  * z/Architecture Vector Extension Facility.
  * z/Architecture Vector Extension Facility.

+ 1 - 6
arch/s390/crypto/des_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -6,12 +7,6 @@
  * Copyright IBM Corp. 2003, 2011
  * Copyright IBM Corp. 2003, 2011
  * Author(s): Thomas Spatzier
  * Author(s): Thomas Spatzier
  *	      Jan Glauber (jan.glauber@de.ibm.com)
  *	      Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
  */
 
 
 #include <linux/init.h>
 #include <linux/init.h>

+ 1 - 0
arch/s390/crypto/ghash_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *

+ 1 - 5
arch/s390/crypto/paes_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -7,11 +8,6 @@
  *   Copyright IBM Corp. 2017
  *   Copyright IBM Corp. 2017
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *		Harald Freudenberger <freude@de.ibm.com>
  *		Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
  */
 
 
 #define KMSG_COMPONENT "paes_s390"
 #define KMSG_COMPONENT "paes_s390"

+ 1 - 0
arch/s390/crypto/prng.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Copyright IBM Corp. 2006, 2015
  * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>

+ 1 - 6
arch/s390/crypto/sha.h

@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -5,12 +6,6 @@
  *
  *
  * Copyright IBM Corp. 2007
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
  */
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H

+ 1 - 6
arch/s390/crypto/sha256_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -6,12 +7,6 @@
  * s390 Version:
  * s390 Version:
  *   Copyright IBM Corp. 2005, 2011
  *   Copyright IBM Corp. 2005, 2011
  *   Author(s): Jan Glauber (jang@de.ibm.com)
  *   Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
  */
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/init.h>

+ 1 - 6
arch/s390/crypto/sha512_s390.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -5,12 +6,6 @@
  *
  *
  * Copyright IBM Corp. 2007
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
  */
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
 #include <crypto/sha.h>

+ 1 - 6
arch/s390/crypto/sha_common.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
 /*
  * Cryptographic API.
  * Cryptographic API.
  *
  *
@@ -5,12 +6,6 @@
  *
  *
  * Copyright IBM Corp. 2007
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
  */
 
 
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>

+ 1 - 1
arch/s390/hypfs/inode.c

@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
 /*
  *    Hypervisor filesystem for Linux on s390.
  *    Hypervisor filesystem for Linux on s390.
  *
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
- *    License: GPL
  */
  */
 
 
 #define KMSG_COMPONENT "hypfs"
 #define KMSG_COMPONENT "hypfs"

+ 1 - 4
arch/s390/include/asm/cpu_mf.h

@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * CPU-measurement facilities
  * CPU-measurement facilities
  *
  *
  *  Copyright IBM Corp. 2012
  *  Copyright IBM Corp. 2012
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *	       Jan Glauber <jang@linux.vnet.ibm.com>
  *	       Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
  */
 #ifndef _ASM_S390_CPU_MF_H
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H

+ 8 - 7
arch/s390/include/asm/elf.h

@@ -194,13 +194,14 @@ struct arch_elf_state {
 #define CORE_DUMP_USE_REGSET
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
 
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE		(is_compat_task() ? 0x000400000UL : \
-						    0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. 64-bit
+   tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+				(STACK_TOP / 3 * 2) : \
+				(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
 
 /* This yields a mask that user programs can use to figure out what
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
    instruction set this CPU supports. */

+ 1 - 14
arch/s390/include/asm/kprobes.h

@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 #ifndef _ASM_S390_KPROBES_H
 #ifndef _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 /*
 /*
  *  Kernel Probes (KProbes)
  *  Kernel Probes (KProbes)
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  * Copyright IBM Corp. 2002, 2006
  *
  *
  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel

+ 1 - 4
arch/s390/include/asm/kvm_host.h

@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * definition for kernel virtual machines on s390
  * definition for kernel virtual machines on s390
  *
  *
  * Copyright IBM Corp. 2008, 2009
  * Copyright IBM Corp. 2008, 2009
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
  */
 
 

+ 1 - 6
arch/s390/include/asm/kvm_para.h

@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * definition for paravirtual devices on s390
  * definition for paravirtual devices on s390
  *
  *
  * Copyright IBM Corp. 2008
  * Copyright IBM Corp. 2008
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
  */
 /*
 /*
@@ -20,8 +17,6 @@
  *
  *
  * Copyright IBM Corp. 2007,2008
  * Copyright IBM Corp. 2007,2008
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
  */
  */
 #ifndef __S390_KVM_PARA_H
 #ifndef __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H

+ 1 - 7
arch/s390/include/asm/livepatch.h

@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
 /*
  * livepatch.h - s390-specific Kernel Live Patching Core
  * livepatch.h - s390-specific Kernel Live Patching Core
  *
  *
@@ -7,13 +8,6 @@
  *	      Jiri Slaby
  *	      Jiri Slaby
  */
  */
 
 
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
 #ifndef ASM_LIVEPATCH_H
 #ifndef ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 
 

+ 1 - 1
arch/s390/include/asm/mmu_context.h

@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_PGSTE
 #ifdef CONFIG_PGSTE
 	mm->context.alloc_pgste = page_table_allocate_pgste ||
 	mm->context.alloc_pgste = page_table_allocate_pgste ||
 		test_thread_flag(TIF_PGSTE) ||
 		test_thread_flag(TIF_PGSTE) ||
-		current->mm->context.alloc_pgste;
+		(current->mm && current->mm->context.alloc_pgste);
 	mm->context.has_pgste = 0;
 	mm->context.has_pgste = 0;
 	mm->context.use_skey = 0;
 	mm->context.use_skey = 0;
 	mm->context.use_cmma = 0;
 	mm->context.use_cmma = 0;

+ 7 - 1
arch/s390/include/asm/pgtable.h

@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 }
 }
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 static inline int pmd_write(pmd_t pmd)
 {
 {
 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
@@ -1264,6 +1264,12 @@ static inline pud_t pud_mkwrite(pud_t pud)
 	return pud;
 	return pud;
 }
 }
 
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
 static inline pud_t pud_mkclean(pud_t pud)
 static inline pud_t pud_mkclean(pud_t pud)
 {
 {
 	if (pud_large(pud)) {
 	if (pud_large(pud)) {

+ 1 - 4
arch/s390/include/asm/syscall.h

@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * Access to user system call parameters and results
  * Access to user system call parameters and results
  *
  *
  *  Copyright IBM Corp. 2008
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
  */
 
 
 #ifndef _ASM_SYSCALL_H
 #ifndef _ASM_SYSCALL_H

+ 1 - 4
arch/s390/include/asm/sysinfo.h

@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * definition for store system information stsi
  * definition for store system information stsi
  *
  *
  * Copyright IBM Corp. 2001, 2008
  * Copyright IBM Corp. 2001, 2008
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *		 Christian Borntraeger <borntraeger@de.ibm.com>
  *		 Christian Borntraeger <borntraeger@de.ibm.com>
  */
  */

+ 1 - 0
arch/s390/include/asm/topology.h

@@ -53,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 static inline void topology_init_early(void) { }
 static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
 static inline void topology_expect_change(void) { }
 static inline void topology_expect_change(void) { }
 
 
 #endif /* CONFIG_SCHED_TOPOLOGY */
 #endif /* CONFIG_SCHED_TOPOLOGY */

+ 0 - 4
arch/s390/include/uapi/asm/kvm.h

@@ -6,10 +6,6 @@
  *
  *
  * Copyright IBM Corp. 2008
  * Copyright IBM Corp. 2008
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
  */

+ 0 - 4
arch/s390/include/uapi/asm/kvm_para.h

@@ -4,9 +4,5 @@
  *
  *
  * Copyright IBM Corp. 2008
  * Copyright IBM Corp. 2008
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
  */

+ 0 - 4
arch/s390/include/uapi/asm/kvm_perf.h

@@ -4,10 +4,6 @@
  *
  *
  * Copyright 2014 IBM Corp.
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
  */
 
 
 #ifndef __LINUX_KVM_PERF_S390_H
 #ifndef __LINUX_KVM_PERF_S390_H

+ 0 - 4
arch/s390/include/uapi/asm/virtio-ccw.h

@@ -4,10 +4,6 @@
  *
  *
  * Copyright IBM Corp. 2013
  * Copyright IBM Corp. 2013
  *
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
  */
 #ifndef __KVM_VIRTIO_CCW_H
 #ifndef __KVM_VIRTIO_CCW_H

+ 0 - 14
arch/s390/include/uapi/asm/zcrypt.h

@@ -9,20 +9,6 @@
  *	       Eric Rossman (edrossma@us.ibm.com)
  *	       Eric Rossman (edrossma@us.ibm.com)
  *
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
  */
 
 
 #ifndef __ASM_S390_ZCRYPT_H
 #ifndef __ASM_S390_ZCRYPT_H

+ 1 - 1
arch/s390/kernel/debug.c

@@ -1392,7 +1392,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
 	else
 	else
 		except_str = "-";
 		except_str = "-";
 	caller = (unsigned long) entry->caller;
 	caller = (unsigned long) entry->caller;
-	rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p  ",
+	rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
 		      area, sec, usec, level, except_str,
 		      area, sec, usec, level, except_str,
 		      entry->id.fields.cpuid, (void *)caller);
 		      entry->id.fields.cpuid, (void *)caller);
 	return rc;
 	return rc;

+ 8 - 2
arch/s390/kernel/dis.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Disassemble s390 instructions.
  * Disassemble s390 instructions.
  *
  *
@@ -396,9 +397,14 @@ struct s390_insn *find_insn(unsigned char *code)
 	unsigned char opfrag;
 	unsigned char opfrag;
 	int i;
 	int i;
 
 
+	/* Search the opcode offset table to find an entry which
+	 * matches the beginning of the opcode. If there is no match
+	 * the last entry will be used, which is the default entry for
+	 * unknown instructions as well as 1-byte opcode instructions.
+	 */
 	for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
 	for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
 		entry = &opcode_offset[i];
 		entry = &opcode_offset[i];
-		if (entry->opcode == code[0] || entry->opcode == 0)
+		if (entry->opcode == code[0])
 			break;
 			break;
 	}
 	}
 
 
@@ -543,7 +549,7 @@ void show_code(struct pt_regs *regs)
 		start += opsize;
 		start += opsize;
 		pr_cont("%s", buffer);
 		pr_cont("%s", buffer);
 		ptr = buffer;
 		ptr = buffer;
-		ptr += sprintf(ptr, "\n\t  ");
+		ptr += sprintf(ptr, "\n          ");
 		hops++;
 		hops++;
 	}
 	}
 	pr_cont("\n");
 	pr_cont("\n");

+ 1 - 0
arch/s390/kernel/dumpstack.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Stack dumping functions
  * Stack dumping functions
  *
  *

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác