Просмотр исходного кода

Merge branches 'pm-cpufreq' and 'pm-cpuidle'

* pm-cpufreq:
  intel_pstate: Knights Landing support
  intel_pstate: remove MSR test
  cpufreq: fix qoriq uniprocessor build
  cpufreq: hisilicon: add acpu driver
  cpufreq: powernv: Report cpu frequency throttling
  cpufreq: qoriq: rename the driver
  cpufreq: qoriq: Make the driver usable on all QorIQ platforms

* pm-cpuidle:
  intel_idle: mark cpu id array as __initconst
  intel_idle: Add support for the Airmont Core in the Cherrytrail and Braswell SOCs
  intel_idle: Update support for Silvermont Core in Baytrail SOC
  ARM: cpuidle: Document the code
  ARM: cpuidle: Register per cpuidle device
  ARM: cpuidle: Enable the ARM64 driver for both ARM32/ARM64
  ARM64: cpuidle: Remove arm64 reference
  ARM64: cpuidle: Rename cpu_init_idle to a common function name
  ARM64: cpuidle: Replace cpu_suspend by the common ARM/ARM64 function
  ARM: cpuidle: Add a cpuidle ops structure to be used for DT
  ARM: cpuidle: Remove duplicate header inclusion
Rafael J. Wysocki 10 лет назад
Родитель
Сommit
419a48ce85

+ 23 - 0
arch/arm/include/asm/cpuidle.h

@@ -1,6 +1,8 @@
 #ifndef __ASM_ARM_CPUIDLE_H
 #define __ASM_ARM_CPUIDLE_H
 
+#include <asm/proc-fns.h>
+
 #ifdef CONFIG_CPU_IDLE
 extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int index);
@@ -25,4 +27,25 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
  */
 #define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
 
+struct device_node;
+
+struct cpuidle_ops {
+	int (*suspend)(int cpu, unsigned long arg);
+	int (*init)(struct device_node *, int cpu);
+};
+
+struct of_cpuidle_method {
+	const char *method;
+	struct cpuidle_ops *ops;
+};
+
+#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)			\
+	static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+	__used __section(__cpuidle_method_of_table)			\
+	= { .method = _method, .ops = _ops }
+
+extern int arm_cpuidle_suspend(int index);
+
+extern int arm_cpuidle_init(int cpu);
+
 #endif

+ 132 - 1
arch/arm/kernel/cpuidle.c

@@ -10,8 +10,28 @@
  */
 
 #include <linux/cpuidle.h>
-#include <asm/proc-fns.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/cpuidle.h>
 
+extern struct of_cpuidle_method __cpuidle_method_of_table[];
+
+static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
+	__used __section(__cpuidle_method_of_table_end);
+
+static struct cpuidle_ops cpuidle_ops[NR_CPUS];
+
+/**
+ * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
+ * @dev: not used
+ * @drv: not used
+ * @index: not used
+ *
+ * A trivial wrapper to allow the cpu_do_idle function to be assigned as a
+ * cpuidle callback by matching the function signature.
+ *
+ * Returns the index passed as parameter
+ */
 int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int index)
 {
@@ -19,3 +39,114 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
 
 	return index;
 }
+
+/**
+ * arm_cpuidle_suspend() - function to enter low power idle states
+ * @index: an integer used as an identifier for the low level PM callbacks
+ *
+ * This function calls the underlying arch specific low level PM code as
+ * registered at the init time.
+ *
+ * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
+ * callback otherwise.
+ */
+int arm_cpuidle_suspend(int index)
+{
+	int ret = -EOPNOTSUPP;
+	int cpu = smp_processor_id();
+
+	if (cpuidle_ops[cpu].suspend)
+		ret = cpuidle_ops[cpu].suspend(cpu, index);
+
+	return ret;
+}
+
+/**
+ * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name
+ * @method: the method name
+ *
+ * Search in the __cpuidle_method_of_table array the cpuidle ops matching the
+ * method name.
+ *
+ * Returns a struct cpuidle_ops pointer, NULL if not found.
+ */
+static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
+{
+	struct of_cpuidle_method *m = __cpuidle_method_of_table;
+
+	for (; m->method; m++)
+		if (!strcmp(m->method, method))
+			return m->ops;
+
+	return NULL;
+}
+
+/**
+ * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree
+ * @dn: a pointer to a struct device node corresponding to a cpu node
+ * @cpu: the cpu identifier
+ *
+ * Get the method name defined in the 'enable-method' property, retrieve the
+ * associated cpuidle_ops and do a struct copy. This copy is needed because all
+ * cpuidle_ops are tagged __initdata and will be unloaded after the init
+ * process.
+ *
+ * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
+ * no cpuidle_ops is registered for the 'enable-method'.
+ */
+static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
+{
+	const char *enable_method;
+	struct cpuidle_ops *ops;
+
+	enable_method = of_get_property(dn, "enable-method", NULL);
+	if (!enable_method)
+		return -ENOENT;
+
+	ops = arm_cpuidle_get_ops(enable_method);
+	if (!ops) {
+		pr_warn("%s: unsupported enable-method property: %s\n",
+			dn->full_name, enable_method);
+		return -EOPNOTSUPP;
+	}
+
+	cpuidle_ops[cpu] = *ops; /* structure copy */
+
+	pr_notice("cpuidle: enable-method property '%s'"
+		  " found operations\n", enable_method);
+
+	return 0;
+}
+
+/**
+ * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu
+ * @cpu: the cpu to be initialized
+ *
+ * Initialize the cpuidle ops with the device for the cpu and then call
+ * the cpu's idle initialization callback. This may fail if the underlying HW
+ * is not operational.
+ *
+ * Returns:
+ *  0 on success,
+ *  -ENODEV if it fails to find the cpu node in the device tree,
+ *  -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ *  -ENOENT if it fails to find an 'enable-method' property,
+ *  -ENXIO if the HW reports a failure or a misconfiguration,
+ *  -ENOMEM if the HW report an memory allocation failure 
+ */
+int __init arm_cpuidle_init(int cpu)
+{
+	struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+	int ret;
+
+	if (!cpu_node)
+		return -ENODEV;
+
+	ret = arm_cpuidle_read_ops(cpu_node, cpu);
+	if (!ret && cpuidle_ops[cpu].init)
+		ret = cpuidle_ops[cpu].init(cpu_node, cpu);
+
+	of_node_put(cpu_node);
+
+	return ret;
+}

+ 0 - 1
arch/arm/mach-davinci/cpuidle.c

@@ -17,7 +17,6 @@
 #include <linux/cpuidle.h>
 #include <linux/io.h>
 #include <linux/export.h>
-#include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
 #include <mach/cpuidle.h>

+ 0 - 1
arch/arm/mach-imx/cpuidle-imx6q.c

@@ -9,7 +9,6 @@
 #include <linux/cpuidle.h>
 #include <linux/module.h>
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 
 #include "common.h"
 #include "cpuidle.h"

+ 0 - 1
arch/arm/mach-imx/cpuidle-imx6sl.c

@@ -9,7 +9,6 @@
 #include <linux/cpuidle.h>
 #include <linux/module.h>
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 
 #include "common.h"
 #include "cpuidle.h"

+ 0 - 1
arch/arm/mach-imx/cpuidle-imx6sx.c

@@ -10,7 +10,6 @@
 #include <linux/cpu_pm.h>
 #include <linux/module.h>
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 #include <asm/suspend.h>
 
 #include "common.h"

+ 0 - 1
arch/arm/mach-omap2/cpuidle44xx.c

@@ -17,7 +17,6 @@
 #include <linux/clockchips.h>
 
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 
 #include "common.h"
 #include "pm.h"

+ 1 - 1
arch/arm/mach-s3c64xx/cpuidle.c

@@ -16,7 +16,7 @@
 #include <linux/export.h>
 #include <linux/time.h>
 
-#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
 
 #include <mach/map.h>
 

+ 0 - 1
arch/arm/mach-tegra/cpuidle-tegra20.c

@@ -27,7 +27,6 @@
 #include <linux/module.h>
 
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 

+ 0 - 1
arch/arm/mach-tegra/cpuidle-tegra30.c

@@ -27,7 +27,6 @@
 #include <linux/module.h>
 
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 

+ 1 - 1
arch/arm64/configs/defconfig

@@ -48,7 +48,7 @@ CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
 CONFIG_CPU_IDLE=y
-CONFIG_ARM64_CPUIDLE=y
+CONFIG_ARM_CPUIDLE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y

+ 6 - 3
arch/arm64/include/asm/cpuidle.h

@@ -4,10 +4,10 @@
 #include <asm/proc-fns.h>
 
 #ifdef CONFIG_CPU_IDLE
-extern int cpu_init_idle(unsigned int cpu);
+extern int arm_cpuidle_init(unsigned int cpu);
 extern int cpu_suspend(unsigned long arg);
 #else
-static inline int cpu_init_idle(unsigned int cpu)
+static inline int arm_cpuidle_init(unsigned int cpu)
 {
 	return -EOPNOTSUPP;
 }
@@ -17,5 +17,8 @@ static inline int cpu_suspend(unsigned long arg)
 	return -EOPNOTSUPP;
 }
 #endif
-
+static inline int arm_cpuidle_suspend(int index)
+{
+	return cpu_suspend(index);
+}
 #endif

+ 1 - 1
arch/arm64/kernel/cpuidle.c

@@ -15,7 +15,7 @@
 #include <asm/cpuidle.h>
 #include <asm/cpu_ops.h>
 
-int cpu_init_idle(unsigned int cpu)
+int arm_cpuidle_init(unsigned int cpu)
 {
 	int ret = -EOPNOTSUPP;
 	struct device_node *cpu_node = of_cpu_device_node_get(cpu);

+ 8 - 0
drivers/cpufreq/Kconfig

@@ -293,5 +293,13 @@ config SH_CPU_FREQ
 	  If unsure, say N.
 endif
 
+config QORIQ_CPUFREQ
+	tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
+	depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+	select CLK_QORIQ
+	help
+	  This adds the CPUFreq driver support for Freescale QorIQ SoCs
+	  which are capable of changing the CPU's frequency dynamically.
+
 endif
 endmenu

+ 9 - 0
drivers/cpufreq/Kconfig.arm

@@ -108,6 +108,15 @@ config ARM_HIGHBANK_CPUFREQ
 
 	  If in doubt, say N.
 
+config ARM_HISI_ACPU_CPUFREQ
+	tristate "Hisilicon ACPU CPUfreq driver"
+	depends on ARCH_HISI && CPUFREQ_DT
+	select PM_OPP
+	help
+	  This enables the hisilicon ACPU CPUfreq driver.
+
+	  If in doubt, say N.
+
 config ARM_IMX6Q_CPUFREQ
 	tristate "Freescale i.MX6 cpufreq support"
 	depends on ARCH_MXC

+ 0 - 9
drivers/cpufreq/Kconfig.powerpc

@@ -23,15 +23,6 @@ config CPU_FREQ_MAPLE
 	  This adds support for frequency switching on Maple 970FX
 	  Evaluation Board and compatible boards (IBM JS2x blades).
 
-config PPC_CORENET_CPUFREQ
-	tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
-	depends on PPC_E500MC && OF && COMMON_CLK
-	select CLK_QORIQ
-	help
-	  This adds the CPUFreq driver support for Freescale e500mc,
-	  e5500 and e6500 series SoCs which are capable of changing
-	  the CPU's frequency dynamically.
-
 config CPU_FREQ_PMAC
 	bool "Support for Apple PowerBooks"
 	depends on ADB_PMU && PPC32

+ 2 - 1
drivers/cpufreq/Makefile

@@ -59,6 +59,7 @@ arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)	+= exynos4x12-cpufreq.o
 arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)	+= exynos5250-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)	+= exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)	+= highbank-cpufreq.o
+obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ)	+= hisi-acpu-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
@@ -85,7 +86,7 @@ obj-$(CONFIG_CPU_FREQ_CBE)		+= ppc-cbe-cpufreq.o
 ppc-cbe-cpufreq-y			+= ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
 obj-$(CONFIG_CPU_FREQ_CBE_PMI)		+= ppc_cbe_cpufreq_pmi.o
 obj-$(CONFIG_CPU_FREQ_MAPLE)		+= maple-cpufreq.o
-obj-$(CONFIG_PPC_CORENET_CPUFREQ)   += ppc-corenet-cpufreq.o
+obj-$(CONFIG_QORIQ_CPUFREQ)   		+= qoriq-cpufreq.o
 obj-$(CONFIG_CPU_FREQ_PMAC)		+= pmac32-cpufreq.o
 obj-$(CONFIG_CPU_FREQ_PMAC64)		+= pmac64-cpufreq.o
 obj-$(CONFIG_PPC_PASEMI_CPUFREQ)	+= pasemi-cpufreq.o

+ 42 - 0
drivers/cpufreq/hisi-acpu-cpufreq.c

@@ -0,0 +1,42 @@
+/*
+ * Hisilicon Platforms Using ACPU CPUFreq Support
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int __init hisi_acpu_cpufreq_driver_init(void)
+{
+	struct platform_device *pdev;
+
+	if (!of_machine_is_compatible("hisilicon,hi6220"))
+		return -ENODEV;
+
+	pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+	return PTR_ERR_OR_ZERO(pdev);
+}
+module_init(hisi_acpu_cpufreq_driver_init);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
+MODULE_LICENSE("GPL v2");

+ 31 - 14
drivers/cpufreq/intel_pstate.c

@@ -614,6 +614,19 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
 	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
+static int knl_get_turbo_pstate(void)
+{
+	u64 value;
+	int nont, ret;
+
+	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+	nont = core_get_max_pstate();
+	ret = (((value) >> 8) & 0xFF);
+	if (ret <= nont)
+		ret = nont;
+	return ret;
+}
+
 static struct cpu_defaults core_params = {
 	.pid_policy = {
 		.sample_rate_ms = 10,
@@ -651,6 +664,23 @@ static struct cpu_defaults byt_params = {
 	},
 };
 
+static struct cpu_defaults knl_params = {
+	.pid_policy = {
+		.sample_rate_ms = 10,
+		.deadband = 0,
+		.setpoint = 97,
+		.p_gain_pct = 20,
+		.d_gain_pct = 0,
+		.i_gain_pct = 0,
+	},
+	.funcs = {
+		.get_max = core_get_max_pstate,
+		.get_min = core_get_min_pstate,
+		.get_turbo = knl_get_turbo_pstate,
+		.set = core_set_pstate,
+	},
+};
+
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
 	int max_perf = cpu->pstate.turbo_pstate;
@@ -865,6 +895,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(0x4e, core_params),
 	ICPU(0x4f, core_params),
 	ICPU(0x56, core_params),
+	ICPU(0x57, knl_params),
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -1024,25 +1055,11 @@ static unsigned int force_load;
 
 static int intel_pstate_msrs_not_valid(void)
 {
-	/* Check that all the msr's we are using are valid. */
-	u64 aperf, mperf, tmp;
-
-	rdmsrl(MSR_IA32_APERF, aperf);
-	rdmsrl(MSR_IA32_MPERF, mperf);
-
 	if (!pstate_funcs.get_max() ||
 	    !pstate_funcs.get_min() ||
 	    !pstate_funcs.get_turbo())
 		return -ENODEV;
 
-	rdmsrl(MSR_IA32_APERF, tmp);
-	if (!(tmp - aperf))
-		return -ENODEV;
-
-	rdmsrl(MSR_IA32_MPERF, tmp);
-	if (!(tmp - mperf))
-		return -ENODEV;
-
 	return 0;
 }
 

+ 46 - 1
drivers/cpufreq/powernv-cpufreq.c

@@ -34,9 +34,13 @@
 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
 
 #define POWERNV_MAX_PSTATES	256
+#define PMSR_PSAFE_ENABLE	(1UL << 30)
+#define PMSR_SPR_EM_DISABLE	(1UL << 31)
+#define PMSR_MAX(x)		((x >> 32) & 0xFF)
+#define PMSR_LP(x)		((x >> 48) & 0xFF)
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-static bool rebooting;
+static bool rebooting, throttled;
 
 /*
  * Note: The set of pstates consists of contiguous integers, the
@@ -294,6 +298,44 @@ static inline unsigned int get_nominal_index(void)
 	return powernv_pstate_info.max - powernv_pstate_info.nominal;
 }
 
+static void powernv_cpufreq_throttle_check(unsigned int cpu)
+{
+	unsigned long pmsr;
+	int pmsr_pmax, pmsr_lp;
+
+	pmsr = get_pmspr(SPRN_PMSR);
+
+	/* Check for Pmax Capping */
+	pmsr_pmax = (s8)PMSR_MAX(pmsr);
+	if (pmsr_pmax != powernv_pstate_info.max) {
+		throttled = true;
+		pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
+		pr_info("Max allowed Pstate is capped\n");
+	}
+
+	/*
+	 * Check for Psafe by reading LocalPstate
+	 * or check if Psafe_mode_active is set in PMSR.
+	 */
+	pmsr_lp = (s8)PMSR_LP(pmsr);
+	if ((pmsr_lp < powernv_pstate_info.min) ||
+				(pmsr & PMSR_PSAFE_ENABLE)) {
+		throttled = true;
+		pr_info("Pstate set to safe frequency\n");
+	}
+
+	/* Check if SPR_EM_DISABLE is set in PMSR */
+	if (pmsr & PMSR_SPR_EM_DISABLE) {
+		throttled = true;
+		pr_info("Frequency Control disabled from OS\n");
+	}
+
+	if (throttled) {
+		pr_info("PMSR = %16lx\n", pmsr);
+		pr_crit("CPU Frequency could be throttled\n");
+	}
+}
+
 /*
  * powernv_cpufreq_target_index: Sets the frequency corresponding to
  * the cpufreq table entry indexed by new_index on the cpus in the
@@ -307,6 +349,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
 	if (unlikely(rebooting) && new_index != get_nominal_index())
 		return 0;
 
+	if (!throttled)
+		powernv_cpufreq_throttle_check(smp_processor_id());
+
 	freq_data.pstate_id = powernv_freqs[new_index].driver_data;
 
 	/*

+ 101 - 62
drivers/cpufreq/ppc-corenet-cpufreq.c → drivers/cpufreq/qoriq-cpufreq.c

@@ -1,7 +1,7 @@
 /*
  * Copyright 2013 Freescale Semiconductor, Inc.
  *
- * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs.
+ * CPU Frequency Scaling driver for Freescale QorIQ SoCs.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,12 +20,13 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
-#include <sysdev/fsl_soc.h>
 
+#if !defined(CONFIG_ARM)
 #include <asm/smp.h>	/* for get_hard_smp_processor_id() in UP configs */
+#endif
 
 /**
- * struct cpu_data - per CPU data struct
+ * struct cpu_data
  * @parent: the parent node of cpu clock
  * @table: frequency table
  */
@@ -69,17 +70,78 @@ static const struct soc_data sdata[] = {
 static u32 min_cpufreq;
 static const u32 *fmask;
 
-static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
+#if defined(CONFIG_ARM)
+static int get_cpu_physical_id(int cpu)
+{
+	return topology_core_id(cpu);
+}
+#else
+static int get_cpu_physical_id(int cpu)
+{
+	return get_hard_smp_processor_id(cpu);
+}
+#endif
 
-/* cpumask in a cluster */
-static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
+static u32 get_bus_freq(void)
+{
+	struct device_node *soc;
+	u32 sysfreq;
+
+	soc = of_find_node_by_type(NULL, "soc");
+	if (!soc)
+		return 0;
+
+	if (of_property_read_u32(soc, "bus-frequency", &sysfreq))
+		sysfreq = 0;
 
-#ifndef CONFIG_SMP
-static inline const struct cpumask *cpu_core_mask(int cpu)
+	of_node_put(soc);
+
+	return sysfreq;
+}
+
+static struct device_node *cpu_to_clk_node(int cpu)
 {
-	return cpumask_of(0);
+	struct device_node *np, *clk_np;
+
+	if (!cpu_present(cpu))
+		return NULL;
+
+	np = of_get_cpu_node(cpu, NULL);
+	if (!np)
+		return NULL;
+
+	clk_np = of_parse_phandle(np, "clocks", 0);
+	if (!clk_np)
+		return NULL;
+
+	of_node_put(np);
+
+	return clk_np;
+}
+
+/* traverse cpu nodes to get cpu mask of sharing clock wire */
+static void set_affected_cpus(struct cpufreq_policy *policy)
+{
+	struct device_node *np, *clk_np;
+	struct cpumask *dstp = policy->cpus;
+	int i;
+
+	np = cpu_to_clk_node(policy->cpu);
+	if (!np)
+		return;
+
+	for_each_present_cpu(i) {
+		clk_np = cpu_to_clk_node(i);
+		if (!clk_np)
+			continue;
+
+		if (clk_np == np)
+			cpumask_set_cpu(i, dstp);
+
+		of_node_put(clk_np);
+	}
+	of_node_put(np);
 }
-#endif
 
 /* reduce the duplicated frequencies in frequency table */
 static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
@@ -107,6 +169,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
 	int i, j, ind;
 	unsigned int freq, max_freq;
 	struct cpufreq_frequency_table table;
+
 	for (i = 0; i < count - 1; i++) {
 		max_freq = freq_table[i].frequency;
 		ind = i;
@@ -131,7 +194,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
 	}
 }
 
-static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
 	struct device_node *np;
 	int i, count, ret;
@@ -147,10 +210,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 		return -ENODEV;
 
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data) {
-		pr_err("%s: no memory\n", __func__);
+	if (!data)
 		goto err_np;
-	}
 
 	policy->clk = of_clk_get(np, 0);
 	if (IS_ERR(policy->clk)) {
@@ -172,7 +233,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	}
 
 	if (fmask)
-		mask = fmask[get_hard_smp_processor_id(cpu)];
+		mask = fmask[get_cpu_physical_id(cpu)];
 	else
 		mask = 0x0;
 
@@ -203,13 +264,12 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	data->table = table;
 
 	/* update ->cpus if we have cluster, no harm if not */
-	cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
-	for_each_cpu(i, per_cpu(cpu_mask, cpu))
-		per_cpu(cpu_data, i) = data;
+	set_affected_cpus(policy);
+	policy->driver_data = data;
 
 	/* Minimum transition latency is 12 platform clocks */
 	u64temp = 12ULL * NSEC_PER_SEC;
-	do_div(u64temp, fsl_get_sys_freq());
+	do_div(u64temp, get_bus_freq());
 	policy->cpuinfo.transition_latency = u64temp + 1;
 
 	of_node_put(np);
@@ -221,7 +281,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 err_node:
 	of_node_put(data->parent);
 err_nomem2:
-	per_cpu(cpu_data, cpu) = NULL;
+	policy->driver_data = NULL;
 	kfree(data);
 err_np:
 	of_node_put(np);
@@ -229,43 +289,40 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	return -ENODEV;
 }
 
-static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-	struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
-	unsigned int cpu;
+	struct cpu_data *data = policy->driver_data;
 
 	of_node_put(data->parent);
 	kfree(data->table);
 	kfree(data);
-
-	for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
-		per_cpu(cpu_data, cpu) = NULL;
+	policy->driver_data = NULL;
 
 	return 0;
 }
 
-static int corenet_cpufreq_target(struct cpufreq_policy *policy,
+static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
 		unsigned int index)
 {
 	struct clk *parent;
-	struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
+	struct cpu_data *data = policy->driver_data;
 
 	parent = of_clk_get(data->parent, data->table[index].driver_data);
 	return clk_set_parent(policy->clk, parent);
 }
 
-static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
-	.name		= "ppc_cpufreq",
+static struct cpufreq_driver qoriq_cpufreq_driver = {
+	.name		= "qoriq_cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
-	.init		= corenet_cpufreq_cpu_init,
-	.exit		= __exit_p(corenet_cpufreq_cpu_exit),
+	.init		= qoriq_cpufreq_cpu_init,
+	.exit		= __exit_p(qoriq_cpufreq_cpu_exit),
 	.verify		= cpufreq_generic_frequency_table_verify,
-	.target_index	= corenet_cpufreq_target,
+	.target_index	= qoriq_cpufreq_target,
 	.get		= cpufreq_generic_get,
 	.attr		= cpufreq_generic_attr,
 };
 
-static const struct of_device_id node_matches[] __initdata = {
+static const struct of_device_id node_matches[] __initconst = {
 	{ .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
 	{ .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
 	{ .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
@@ -275,61 +332,43 @@ static const struct of_device_id node_matches[] __initdata = {
 	{}
 };
 
-static int __init ppc_corenet_cpufreq_init(void)
+static int __init qoriq_cpufreq_init(void)
 {
 	int ret;
 	struct device_node  *np;
 	const struct of_device_id *match;
 	const struct soc_data *data;
-	unsigned int cpu;
 
 	np = of_find_matching_node(NULL, node_matches);
 	if (!np)
 		return -ENODEV;
 
-	for_each_possible_cpu(cpu) {
-		if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
-			goto err_mask;
-		cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
-	}
-
 	match = of_match_node(node_matches, np);
 	data = match->data;
 	if (data) {
 		if (data->flag)
 			fmask = data->freq_mask;
-		min_cpufreq = fsl_get_sys_freq();
+		min_cpufreq = get_bus_freq();
 	} else {
-		min_cpufreq = fsl_get_sys_freq() / 2;
+		min_cpufreq = get_bus_freq() / 2;
 	}
 
 	of_node_put(np);
 
-	ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver);
+	ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
 	if (!ret)
-		pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
+		pr_info("Freescale QorIQ CPU frequency scaling driver\n");
 
 	return ret;
-
-err_mask:
-	for_each_possible_cpu(cpu)
-		free_cpumask_var(per_cpu(cpu_mask, cpu));
-
-	return -ENOMEM;
 }
-module_init(ppc_corenet_cpufreq_init);
+module_init(qoriq_cpufreq_init);
 
-static void __exit ppc_corenet_cpufreq_exit(void)
+static void __exit qoriq_cpufreq_exit(void)
 {
-	unsigned int cpu;
-
-	for_each_possible_cpu(cpu)
-		free_cpumask_var(per_cpu(cpu_mask, cpu));
-
-	cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
+	cpufreq_unregister_driver(&qoriq_cpufreq_driver);
 }
-module_exit(ppc_corenet_cpufreq_exit);
+module_exit(qoriq_cpufreq_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
-MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs");
+MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");

+ 1 - 6
drivers/cpuidle/Kconfig

@@ -29,15 +29,10 @@ config DT_IDLE_STATES
 	bool
 
 menu "ARM CPU Idle Drivers"
-depends on ARM
+depends on ARM || ARM64
 source "drivers/cpuidle/Kconfig.arm"
 endmenu
 
-menu "ARM64 CPU Idle Drivers"
-depends on ARM64
-source "drivers/cpuidle/Kconfig.arm64"
-endmenu
-
 menu "MIPS CPU Idle Drivers"
 depends on MIPS
 source "drivers/cpuidle/Kconfig.mips"

+ 19 - 9
drivers/cpuidle/Kconfig.arm

@@ -1,10 +1,20 @@
 #
 # ARM CPU Idle drivers
 #
+config ARM_CPUIDLE
+        bool "Generic ARM/ARM64 CPU idle Driver"
+        select DT_IDLE_STATES
+        help
+          Select this to enable generic cpuidle driver for ARM.
+          It provides a generic idle driver whose idle states are configured
+          at run-time through DT nodes. The CPUidle suspend backend is
+          initialized by calling the CPU operations init idle hook
+          provided by architecture code.
+
 config ARM_BIG_LITTLE_CPUIDLE
 	bool "Support for ARM big.LITTLE processors"
 	depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
-	depends on MCPM
+	depends on MCPM && !ARM64
 	select ARM_CPU_SUSPEND
 	select CPU_IDLE_MULTIPLE_DRIVERS
 	select DT_IDLE_STATES
@@ -16,51 +26,51 @@ config ARM_BIG_LITTLE_CPUIDLE
 
 config ARM_CLPS711X_CPUIDLE
 	bool "CPU Idle Driver for CLPS711X processors"
-	depends on ARCH_CLPS711X || COMPILE_TEST
+	depends on ARCH_CLPS711X && !ARM64 || COMPILE_TEST
 	help
 	  Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs.
 
 config ARM_HIGHBANK_CPUIDLE
 	bool "CPU Idle Driver for Calxeda processors"
-	depends on ARM_PSCI
+	depends on ARM_PSCI && !ARM64
 	select ARM_CPU_SUSPEND
 	help
 	  Select this to enable cpuidle on Calxeda processors.
 
 config ARM_KIRKWOOD_CPUIDLE
 	bool "CPU Idle Driver for Marvell Kirkwood SoCs"
-	depends on MACH_KIRKWOOD
+	depends on MACH_KIRKWOOD && !ARM64
 	help
 	  This adds the CPU Idle driver for Marvell Kirkwood SoCs.
 
 config ARM_ZYNQ_CPUIDLE
 	bool "CPU Idle Driver for Xilinx Zynq processors"
-	depends on ARCH_ZYNQ
+	depends on ARCH_ZYNQ && !ARM64
 	help
 	  Select this to enable cpuidle on Xilinx Zynq processors.
 
 config ARM_U8500_CPUIDLE
 	bool "Cpu Idle Driver for the ST-E u8500 processors"
-	depends on ARCH_U8500
+	depends on ARCH_U8500 && !ARM64
 	help
 	  Select this to enable cpuidle for ST-E u8500 processors
 
 config ARM_AT91_CPUIDLE
 	bool "Cpu Idle Driver for the AT91 processors"
 	default y
-	depends on ARCH_AT91
+	depends on ARCH_AT91 && !ARM64
 	help
 	  Select this to enable cpuidle for AT91 processors
 
 config ARM_EXYNOS_CPUIDLE
 	bool "Cpu Idle Driver for the Exynos processors"
-	depends on ARCH_EXYNOS
+	depends on ARCH_EXYNOS && !ARM64
 	select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
 	help
 	  Select this to enable cpuidle for Exynos processors
 
 config ARM_MVEBU_V7_CPUIDLE
 	bool "CPU Idle Driver for mvebu v7 family processors"
-	depends on ARCH_MVEBU
+	depends on ARCH_MVEBU && !ARM64
 	help
 	  Select this to enable cpuidle on Armada 370, 38x and XP processors.

+ 0 - 13
drivers/cpuidle/Kconfig.arm64

@@ -1,13 +0,0 @@
-#
-# ARM64 CPU Idle drivers
-#
-
-config ARM64_CPUIDLE
-	bool "Generic ARM64 CPU idle Driver"
-	select DT_IDLE_STATES
-	help
-	  Select this to enable generic cpuidle driver for ARM64.
-	  It provides a generic idle driver whose idle states are configured
-	  at run-time through DT nodes. The CPUidle suspend backend is
-	  initialized by calling the CPU operations init idle hook
-	  provided by architecture code.

+ 1 - 4
drivers/cpuidle/Makefile

@@ -17,15 +17,12 @@ obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)		+= cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
 obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE)        += cpuidle-exynos.o
+obj-$(CONFIG_ARM_CPUIDLE)		+= cpuidle-arm.o
 
 ###############################################################################
 # MIPS drivers
 obj-$(CONFIG_MIPS_CPS_CPUIDLE)		+= cpuidle-cps.o
 
-###############################################################################
-# ARM64 drivers
-obj-$(CONFIG_ARM64_CPUIDLE)		+= cpuidle-arm64.o
-
 ###############################################################################
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o

+ 62 - 21
drivers/cpuidle/cpuidle-arm64.c → drivers/cpuidle/cpuidle-arm.c

@@ -1,5 +1,5 @@
 /*
- * ARM64 generic CPU idle driver.
+ * ARM/ARM64 generic CPU idle driver.
  *
  * Copyright (C) 2014 ARM Ltd.
  * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
@@ -9,7 +9,7 @@
  * published by the Free Software Foundation.
  */
 
-#define pr_fmt(fmt) "CPUidle arm64: " fmt
+#define pr_fmt(fmt) "CPUidle arm: " fmt
 
 #include <linux/cpuidle.h>
 #include <linux/cpumask.h>
@@ -17,13 +17,14 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 
 #include <asm/cpuidle.h>
 
 #include "dt_idle_states.h"
 
 /*
- * arm64_enter_idle_state - Programs CPU to enter the specified state
+ * arm_enter_idle_state - Programs CPU to enter the specified state
  *
  * dev: cpuidle device
  * drv: cpuidle driver
@@ -32,8 +33,8 @@
  * Called from the CPUidle framework to program the device to the
  * specified target state selected by the governor.
  */
-static int arm64_enter_idle_state(struct cpuidle_device *dev,
-				  struct cpuidle_driver *drv, int idx)
+static int arm_enter_idle_state(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv, int idx)
 {
 	int ret;
 
@@ -49,7 +50,7 @@ static int arm64_enter_idle_state(struct cpuidle_device *dev,
 		 * call the CPU ops suspend protocol with idle index as a
 		 * parameter.
 		 */
-		ret = cpu_suspend(idx);
+		arm_cpuidle_suspend(idx);
 
 		cpu_pm_exit();
 	}
@@ -57,8 +58,8 @@ static int arm64_enter_idle_state(struct cpuidle_device *dev,
 	return ret ? -1 : idx;
 }
 
-static struct cpuidle_driver arm64_idle_driver = {
-	.name = "arm64_idle",
+static struct cpuidle_driver arm_idle_driver = {
+	.name = "arm_idle",
 	.owner = THIS_MODULE,
 	/*
 	 * State at index 0 is standby wfi and considered standard
@@ -68,32 +69,33 @@ static struct cpuidle_driver arm64_idle_driver = {
 	 * handler for idle state index 0.
 	 */
 	.states[0] = {
-		.enter                  = arm64_enter_idle_state,
+		.enter                  = arm_enter_idle_state,
 		.exit_latency           = 1,
 		.target_residency       = 1,
 		.power_usage		= UINT_MAX,
 		.name                   = "WFI",
-		.desc                   = "ARM64 WFI",
+		.desc                   = "ARM WFI",
 	}
 };
 
-static const struct of_device_id arm64_idle_state_match[] __initconst = {
+static const struct of_device_id arm_idle_state_match[] __initconst = {
 	{ .compatible = "arm,idle-state",
-	  .data = arm64_enter_idle_state },
+	  .data = arm_enter_idle_state },
 	{ },
 };
 
 /*
- * arm64_idle_init
+ * arm_idle_init
  *
- * Registers the arm64 specific cpuidle driver with the cpuidle
+ * Registers the arm specific cpuidle driver with the cpuidle
  * framework. It relies on core code to parse the idle states
  * and initialize them using driver data structures accordingly.
  */
-static int __init arm64_idle_init(void)
+static int __init arm_idle_init(void)
 {
 	int cpu, ret;
-	struct cpuidle_driver *drv = &arm64_idle_driver;
+	struct cpuidle_driver *drv = &arm_idle_driver;
+	struct cpuidle_device *dev;
 
 	/*
 	 * Initialize idle states data, starting at index 1.
@@ -101,22 +103,61 @@ static int __init arm64_idle_init(void)
 	 * let the driver initialization fail accordingly since there is no
 	 * reason to initialize the idle driver if only wfi is supported.
 	 */
-	ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
+	ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
 	if (ret <= 0)
 		return ret ? : -ENODEV;
 
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("Failed to register cpuidle driver\n");
+		return ret;
+	}
+
 	/*
 	 * Call arch CPU operations in order to initialize
 	 * idle states suspend back-end specific data
 	 */
 	for_each_possible_cpu(cpu) {
-		ret = cpu_init_idle(cpu);
+		ret = arm_cpuidle_init(cpu);
+
+		/*
+		 * Skip the cpuidle device initialization if the reported
+		 * failure is a HW misconfiguration/breakage (-ENXIO).
+		 */
+		if (ret == -ENXIO)
+			continue;
+
 		if (ret) {
 			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
-			return ret;
+			goto out_fail;
+		}
+
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev) {
+			pr_err("Failed to allocate cpuidle device\n");
+			goto out_fail;
+		}
+		dev->cpu = cpu;
+
+		ret = cpuidle_register_device(dev);
+		if (ret) {
+			pr_err("Failed to register cpuidle device for CPU %d\n",
+			       cpu);
+			kfree(dev);
+			goto out_fail;
 		}
 	}
 
-	return cpuidle_register(drv, NULL);
+	return 0;
+out_fail:
+	while (--cpu >= 0) {
+		dev = per_cpu(cpuidle_devices, cpu);
+		cpuidle_unregister_device(dev);
+		kfree(dev);
+	}
+
+	cpuidle_unregister_driver(drv);
+
+	return ret;
 }
-device_initcall(arm64_idle_init);
+device_initcall(arm_idle_init);

+ 0 - 1
drivers/cpuidle/cpuidle-at91.c

@@ -19,7 +19,6 @@
 #include <linux/cpuidle.h>
 #include <linux/io.h>
 #include <linux/export.h>
-#include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
 #define AT91_MAX_STATES	2

+ 0 - 1
drivers/cpuidle/cpuidle-exynos.c

@@ -19,7 +19,6 @@
 #include <linux/of.h>
 #include <linux/platform_data/cpuidle-exynos.h>
 
-#include <asm/proc-fns.h>
 #include <asm/suspend.h>
 #include <asm/cpuidle.h>
 

+ 0 - 1
drivers/cpuidle/cpuidle-kirkwood.c

@@ -21,7 +21,6 @@
 #include <linux/cpuidle.h>
 #include <linux/io.h>
 #include <linux/export.h>
-#include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
 #define KIRKWOOD_MAX_STATES	2

+ 0 - 1
drivers/cpuidle/cpuidle-ux500.c

@@ -19,7 +19,6 @@
 #include <linux/platform_device.h>
 
 #include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
 
 static atomic_t master = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(master_lock);

+ 0 - 1
drivers/cpuidle/cpuidle-zynq.c

@@ -28,7 +28,6 @@
 #include <linux/init.h>
 #include <linux/cpuidle.h>
 #include <linux/platform_device.h>
-#include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
 #define ZYNQ_MAX_STATES		2

+ 56 - 12
drivers/idle/intel_idle.c

@@ -217,19 +217,11 @@ static struct cpuidle_state byt_cstates[] = {
 		.target_residency = 1,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
-	{
-		.name = "C1E-BYT",
-		.desc = "MWAIT 0x01",
-		.flags = MWAIT2flg(0x01),
-		.exit_latency = 15,
-		.target_residency = 30,
-		.enter = &intel_idle,
-		.enter_freeze = intel_idle_freeze, },
 	{
 		.name = "C6N-BYT",
 		.desc = "MWAIT 0x58",
 		.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 40,
+		.exit_latency = 300,
 		.target_residency = 275,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
 		.name = "C6S-BYT",
 		.desc = "MWAIT 0x52",
 		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 140,
+		.exit_latency = 500,
 		.target_residency = 560,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
 		.desc = "MWAIT 0x60",
 		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 1200,
-		.target_residency = 1500,
+		.target_residency = 4000,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
 	{
@@ -261,6 +253,51 @@ static struct cpuidle_state byt_cstates[] = {
 		.enter = NULL }
 };
 
+static struct cpuidle_state cht_cstates[] = {
+	{
+		.name = "C1-CHT",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00),
+		.exit_latency = 1,
+		.target_residency = 1,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6N-CHT",
+		.desc = "MWAIT 0x58",
+		.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 80,
+		.target_residency = 275,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6S-CHT",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 200,
+		.target_residency = 560,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C7-CHT",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C7S-CHT",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.enter = NULL }
+};
+
 static struct cpuidle_state ivb_cstates[] = {
 	{
 		.name = "C1-IVB",
@@ -748,6 +785,12 @@ static const struct idle_cpu idle_cpu_byt = {
 	.byt_auto_demotion_disable_flag = true,
 };
 
+static const struct idle_cpu idle_cpu_cht = {
+	.state_table = cht_cstates,
+	.disable_promotion_to_c1e = true,
+	.byt_auto_demotion_disable_flag = true,
+};
+
 static const struct idle_cpu idle_cpu_ivb = {
 	.state_table = ivb_cstates,
 	.disable_promotion_to_c1e = true,
@@ -776,7 +819,7 @@ static const struct idle_cpu idle_cpu_avn = {
 #define ICPU(model, cpu) \
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
-static const struct x86_cpu_id intel_idle_ids[] = {
+static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(0x1a, idle_cpu_nehalem),
 	ICPU(0x1e, idle_cpu_nehalem),
 	ICPU(0x1f, idle_cpu_nehalem),
@@ -790,6 +833,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
 	ICPU(0x2d, idle_cpu_snb),
 	ICPU(0x36, idle_cpu_atom),
 	ICPU(0x37, idle_cpu_byt),
+	ICPU(0x4c, idle_cpu_cht),
 	ICPU(0x3a, idle_cpu_ivb),
 	ICPU(0x3e, idle_cpu_ivt),
 	ICPU(0x3c, idle_cpu_hsw),

+ 2 - 0
include/asm-generic/vmlinux.lds.h

@@ -167,6 +167,7 @@
 #define IOMMU_OF_TABLES()	OF_TABLE(CONFIG_OF_IOMMU, iommu)
 #define RESERVEDMEM_OF_TABLES()	OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
 #define CPU_METHOD_OF_TABLES()	OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
 #define EARLYCON_OF_TABLES()	OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
 
 #define KERNEL_DTB()							\
@@ -501,6 +502,7 @@
 	CLKSRC_OF_TABLES()						\
 	IOMMU_OF_TABLES()						\
 	CPU_METHOD_OF_TABLES()						\
+	CPUIDLE_METHOD_OF_TABLES()					\
 	KERNEL_DTB()							\
 	IRQCHIP_OF_MATCH_TABLE()					\
 	EARLYCON_OF_TABLES()