浏览代码

Merge tag 'powerpc-4.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "Mostly fairly minor, of note are:

   - Fix percpu allocations to be NUMA aware

   - Limit 4k page size config to 64TB virtual address space

   - Avoid needlessly restoring FP and vector registers

  Thanks to Aneesh Kumar K.V, Breno Leitao, Christophe Leroy, Frederic
  Barrat, Madhavan Srinivasan, Michael Bringmann, Nicholas Piggin,
  Vaibhav Jain"

* tag 'powerpc-4.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/book3s64: Move PPC_DT_CPU_FTRs and enable it by default
  powerpc/mm/4k: Limit 4k page size config to 64TB virtual address space
  cxl: Fix error path on bad ioctl
  powerpc/perf: Fix Power9 test_adder fields
  powerpc/numa: Fix percpu allocations to be NUMA aware
  cxl: Avoid double free_irq() for psl,slice interrupts
  powerpc/kernel: Initialize load_tm on task creation
  powerpc/kernel: Fix FP and vector register restoration
  powerpc/64: Reclaim CPU_FTR_SUBCORE
  powerpc/hotplug-mem: Fix missing endian conversion of aa_index
  powerpc/sysdev/simple_gpio: Fix oops in gpio save_regs function
  powerpc/spufs: Fix coredump of SPU contexts
  powerpc/64s: Add dt_cpu_ftrs boot time setup option
Linus Torvalds 8 年之前
父节点
当前提交
a92f63cd13

+ 9 - 0
Documentation/admin-guide/kernel-parameters.txt

@@ -866,6 +866,15 @@
 
 
 	dscc4.setup=	[NET]
 	dscc4.setup=	[NET]
 
 
+	dt_cpu_ftrs=	[PPC]
+			Format: {"off" | "known"}
+			Control how the dt_cpu_ftrs device-tree binding is
+			used for CPU feature discovery and setup (if it
+			exists).
+			off: Do not use it, fall back to legacy cpu table.
+			known: Do not pass through unknown features to guests
+			or userspace, only those that the kernel is aware of.
+
 	dump_apple_properties	[X86]
 	dump_apple_properties	[X86]
 			Dump name and content of EFI device properties on
 			Dump name and content of EFI device properties on
 			x86 Macs.  Useful for driver authors to determine
 			x86 Macs.  Useful for driver authors to determine

+ 0 - 16
arch/powerpc/Kconfig

@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
 
 
 menu "Kernel options"
 menu "Kernel options"
 
 
-config PPC_DT_CPU_FTRS
-	bool "Device-tree based CPU feature discovery & setup"
-	depends on PPC_BOOK3S_64
-	default n
-	help
-	  This enables code to use a new device tree binding for describing CPU
-	  compatibility and features. Saying Y here will attempt to use the new
-	  binding if the firmware provides it. Currently only the skiboot
-	  firmware provides this binding.
-	  If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-	bool "cpufeatures pass through unknown features to guest/userspace"
-	depends on PPC_DT_CPU_FTRS
-	default y
-
 config HIGHMEM
 config HIGHMEM
 	bool "High memory support"
 	bool "High memory support"
 	depends on PPC32
 	depends on PPC32

+ 1 - 1
arch/powerpc/include/asm/book3s/64/hash-4k.h

@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
 #define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)

+ 1 - 2
arch/powerpc/include/asm/cputable.h

@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x1000000000000000)
 #define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE			LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1		LONG_ASM_CONST(0x4000000000000000)
 #define CPU_FTR_POWER9_DD1		LONG_ASM_CONST(0x4000000000000000)
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
 	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \

+ 12 - 13
arch/powerpc/include/asm/processor.h

@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
 /*
  * Max value currently used:
  * Max value currently used:
  */
  */
-#define TASK_SIZE_USER64	TASK_SIZE_512TB
+#define TASK_SIZE_USER64		TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_128TB
 #else
 #else
-#define TASK_SIZE_USER64	TASK_SIZE_64TB
+#define TASK_SIZE_USER64		TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_64TB
 #endif
 #endif
 
 
 /*
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  * space during mmap's.
  */
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  * with 128TB and conditionally enable upto 512TB
  */
  */
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW	((is_32bit_task()) ? \
-				 TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW	((is_32bit_task()) ?			\
+				 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #else
 #define DEFAULT_MAP_WINDOW	TASK_SIZE
 #define DEFAULT_MAP_WINDOW	TASK_SIZE
 #endif
 #endif
 
 
 #ifdef __powerpc64__
 #ifdef __powerpc64__
 
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 
 #define STACK_TOP (is_32bit_task() ? \
 #define STACK_TOP (is_32bit_task() ? \

+ 14 - 0
arch/powerpc/include/asm/topology.h

@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
 
+static inline int early_cpu_to_node(int cpu)
+{
+	int nid;
+
+	nid = numa_cpu_lookup_table[cpu];
+
+	/*
+	 * Fall back to node 0 if nid is unset (it should be, except bugs).
+	 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+	 */
+	return (nid < 0) ? 0 : nid;
+}
 #else
 #else
 
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 static inline void dump_numa_cpu_topology(void) {}
 
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)

+ 48 - 10
arch/powerpc/kernel/dt_cpu_ftrs.c

@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
 	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
 	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
-	{"subcore", feat_enable, CPU_FTR_SUBCORE},
 	{"no-execute", feat_enable, 0},
 	{"no-execute", feat_enable, 0},
 	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
 	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
 	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
 	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
 	{"wait-v3", feat_enable, 0},
 	{"wait-v3", feat_enable, 0},
 };
 };
 
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+	if (!str)
+		return 0;
+
+	if (!strcmp(str, "off"))
+		using_dt_cpu_ftrs = false;
+	else if (!strcmp(str, "known"))
+		enable_unknown = false;
+	else
+		return 1;
+
+	return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 
 static void __init cpufeatures_setup_start(u32 isa)
 static void __init cpufeatures_setup_start(u32 isa)
 {
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
 		}
 		}
 	}
 	}
 
 
-	if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+	if (!known && enable_unknown) {
 		if (!feat_try_enable_unknown(f)) {
 		if (!feat_try_enable_unknown(f)) {
 			pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
 			pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
 				f->name);
 				f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
 		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 }
 
 
+static int __init disabled_on_cmdline(void)
+{
+	unsigned long root, chosen;
+	const char *p;
+
+	root = of_get_flat_dt_root();
+	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+	if (chosen == -FDT_ERR_NOTFOUND)
+		return false;
+
+	p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+	if (!p)
+		return false;
+
+	if (strstr(p, "dt_cpu_ftrs=off"))
+		return true;
+
+	return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
 					int depth, void *data)
 					int depth, void *data)
 {
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
 	return 0;
 	return 0;
 }
 }
 
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 bool __init dt_cpu_ftrs_in_use(void)
 {
 {
 	return using_dt_cpu_ftrs;
 	return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
 {
+	using_dt_cpu_ftrs = false;
+
 	/* Setup and verify the FDT, if it fails we just bail */
 	/* Setup and verify the FDT, if it fails we just bail */
 	if (!early_init_dt_verify(fdt))
 	if (!early_init_dt_verify(fdt))
 		return false;
 		return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
 		return false;
 		return false;
 
 
+	if (disabled_on_cmdline())
+		return false;
+
 	cpufeatures_setup_cpu();
 	cpufeatures_setup_cpu();
 
 
 	using_dt_cpu_ftrs = true;
 	using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 
 void __init dt_cpu_ftrs_scan(void)
 void __init dt_cpu_ftrs_scan(void)
 {
 {
+	if (!using_dt_cpu_ftrs)
+		return;
+
 	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
 }

+ 3 - 0
arch/powerpc/kernel/process.c

@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
 #ifdef CONFIG_VSX
 	current->thread.used_vsr = 0;
 	current->thread.used_vsr = 0;
 #endif
 #endif
+	current->thread.load_fp = 0;
 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
 	current->thread.fp_save_area = NULL;
 	current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 	current->thread.vr_save_area = NULL;
 	current->thread.vr_save_area = NULL;
 	current->thread.vrsave = 0;
 	current->thread.vrsave = 0;
 	current->thread.used_vr = 0;
 	current->thread.used_vr = 0;
+	current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
 #ifdef CONFIG_SPE
 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 	current->thread.tm_tfhar = 0;
 	current->thread.tm_tfhar = 0;
 	current->thread.tm_texasr = 0;
 	current->thread.tm_texasr = 0;
 	current->thread.tm_tfiar = 0;
 	current->thread.tm_tfiar = 0;
+	current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 }
 EXPORT_SYMBOL(start_thread);
 EXPORT_SYMBOL(start_thread);

+ 1 - 1
arch/powerpc/kernel/setup-common.c

@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-	init_mm.context.addr_limit = TASK_SIZE_128TB;
+	init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #else
 #error	"context.addr_limit not initialized."
 #error	"context.addr_limit not initialized."
 #endif
 #endif

+ 2 - 2
arch/powerpc/kernel/setup_64.c

@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
 
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
 {
-	return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+	return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
 				    __pa(MAX_DMA_ADDRESS));
 				    __pa(MAX_DMA_ADDRESS));
 }
 }
 
 
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
 {
-	if (cpu_to_node(from) == cpu_to_node(to))
+	if (early_cpu_to_node(from) == early_cpu_to_node(to))
 		return LOCAL_DISTANCE;
 		return LOCAL_DISTANCE;
 	else
 	else
 		return REMOTE_DISTANCE;
 		return REMOTE_DISTANCE;

+ 1 - 1
arch/powerpc/mm/mmu_context_book3s64.c

@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
 	 * mm->context.addr_limit. Default to max task size so that we copy the
 	 * mm->context.addr_limit. Default to max task size so that we copy the
 	 * default values to paca which will help us to handle slb miss early.
 	 * default values to paca which will help us to handle slb miss early.
 	 */
 	 */
-	mm->context.addr_limit = TASK_SIZE_128TB;
+	mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
 
 	/*
 	/*
 	 * The old code would re-promote on fork, we don't do that when using
 	 * The old code would re-promote on fork, we don't do that when using

+ 2 - 2
arch/powerpc/perf/power9-pmu.c

@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
 	.name			= "POWER9",
 	.name			= "POWER9",
 	.n_counter		= MAX_PMU_COUNTERS,
 	.n_counter		= MAX_PMU_COUNTERS,
 	.add_fields		= ISA207_ADD_FIELDS,
 	.add_fields		= ISA207_ADD_FIELDS,
-	.test_adder		= ISA207_TEST_ADDER,
+	.test_adder		= P9_DD1_TEST_ADDER,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.config_bhrb		= power9_config_bhrb,
 	.config_bhrb		= power9_config_bhrb,
 	.bhrb_filter_map	= power9_bhrb_filter_map,
 	.bhrb_filter_map	= power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
 	.name			= "POWER9",
 	.name			= "POWER9",
 	.n_counter		= MAX_PMU_COUNTERS,
 	.n_counter		= MAX_PMU_COUNTERS,
 	.add_fields		= ISA207_ADD_FIELDS,
 	.add_fields		= ISA207_ADD_FIELDS,
-	.test_adder		= P9_DD1_TEST_ADDER,
+	.test_adder		= ISA207_TEST_ADDER,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.config_bhrb		= power9_config_bhrb,
 	.config_bhrb		= power9_config_bhrb,
 	.bhrb_filter_map	= power9_bhrb_filter_map,
 	.bhrb_filter_map	= power9_bhrb_filter_map,

+ 11 - 0
arch/powerpc/platforms/Kconfig

@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
 
 
 	  In case of doubt, say Y
 	  In case of doubt, say Y
 
 
+config PPC_DT_CPU_FTRS
+	bool "Device-tree based CPU feature discovery & setup"
+	depends on PPC_BOOK3S_64
+	default y
+	help
+	  This enables code to use a new device tree binding for describing CPU
+	  compatibility and features. Saying Y here will attempt to use the new
+	  binding if the firmware provides it. Currently only the skiboot
+	  firmware provides this binding.
+	  If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
 config UDBG_RTAS_CONSOLE
 	bool "RTAS based debug console"
 	bool "RTAS based debug console"
 	depends on PPC_RTAS
 	depends on PPC_RTAS

+ 2 - 0
arch/powerpc/platforms/cell/spufs/coredump.c

@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
 	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	if (!dump_skip(cprm, skip))
 	if (!dump_skip(cprm, skip))
 		goto Eio;
 		goto Eio;
+
+	rc = 0;
 out:
 out:
 	free_page((unsigned long)buf);
 	free_page((unsigned long)buf);
 	return rc;
 	return rc;

+ 7 - 1
arch/powerpc/platforms/powernv/subcore.c

@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 
 static int subcore_init(void)
 static int subcore_init(void)
 {
 {
-	if (!cpu_has_feature(CPU_FTR_SUBCORE))
+	unsigned pvr_ver;
+
+	pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+	if (pvr_ver != PVR_POWER8 &&
+	    pvr_ver != PVR_POWER8E &&
+	    pvr_ver != PVR_POWER8NVL)
 		return 0;
 		return 0;
 
 
 	/*
 	/*

+ 2 - 0
arch/powerpc/platforms/pseries/hotplug-memory.c

@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
 	for (i = 0; i < num_lmbs; i++) {
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
 		lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
 		lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
 		lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+		lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
 		lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
 		lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
 	}
 	}
 
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
 	for (i = 0; i < num_lmbs; i++) {
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
 		lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
 		lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
 		lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+		lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
 		lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
 		lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
 	}
 	}
 
 

+ 2 - 1
arch/powerpc/sysdev/simple_gpio.c

@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
 {
-	struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+	struct u8_gpio_chip *u8_gc =
+		container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
 
 	u8_gc->data = in_8(mm_gc->regs);
 	u8_gc->data = in_8(mm_gc->regs);
 }
 }

+ 2 - 5
drivers/misc/cxl/file.c

@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
 
 	/* Do this outside the status_mutex to avoid a circular dependency with
 	/* Do this outside the status_mutex to avoid a circular dependency with
 	 * the locking in cxl_mmap_fault() */
 	 * the locking in cxl_mmap_fault() */
-	if (copy_from_user(&work, uwork,
-			   sizeof(struct cxl_ioctl_start_work))) {
-		rc = -EFAULT;
-		goto out;
-	}
+	if (copy_from_user(&work, uwork, sizeof(work)))
+		return -EFAULT;
 
 
 	mutex_lock(&ctx->status_mutex);
 	mutex_lock(&ctx->status_mutex);
 	if (ctx->status != OPENED) {
 	if (ctx->status != OPENED) {

+ 11 - 3
drivers/misc/cxl/native.c

@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
 {
-	if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+	if (adapter->native->err_virq == 0 ||
+	    adapter->native->err_virq !=
+	    irq_find_mapping(NULL, adapter->native->err_hwirq))
 		return;
 		return;
 
 
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->native->err_virq, adapter);
 	cxl_unmap_irq(adapter->native->err_virq, adapter);
 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
 	kfree(adapter->irq_name);
 	kfree(adapter->irq_name);
+	adapter->native->err_virq = 0;
 }
 }
 
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
 {
-	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+	if (afu->serr_virq == 0 ||
+	    afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
 		return;
 		return;
 
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
 	kfree(afu->err_irq_name);
 	kfree(afu->err_irq_name);
+	afu->serr_virq = 0;
 }
 }
 
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
 {
-	if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+	if (afu->native->psl_virq == 0 ||
+	    afu->native->psl_virq !=
+	    irq_find_mapping(NULL, afu->native->psl_hwirq))
 		return;
 		return;
 
 
 	cxl_unmap_irq(afu->native->psl_virq, afu);
 	cxl_unmap_irq(afu->native->psl_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
 	kfree(afu->psl_irq_name);
 	kfree(afu->psl_irq_name);
+	afu->native->psl_virq = 0;
 }
 }
 
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)