|
@@ -49,40 +49,45 @@
|
|
*/
|
|
*/
|
|
|
|
|
|
/**
|
|
/**
|
|
- * struct power_table - frequency to power conversion
|
|
|
|
|
|
+ * struct freq_table - frequency table along with power entries
|
|
* @frequency: frequency in KHz
|
|
* @frequency: frequency in KHz
|
|
* @power: power in mW
|
|
* @power: power in mW
|
|
*
|
|
*
|
|
* This structure is built when the cooling device registers and helps
|
|
* This structure is built when the cooling device registers and helps
|
|
- * in translating frequency to power and viceversa.
|
|
|
|
|
|
+ * in translating frequency to power and vice versa.
|
|
*/
|
|
*/
|
|
-struct power_table {
|
|
|
|
|
|
+struct freq_table {
|
|
u32 frequency;
|
|
u32 frequency;
|
|
u32 power;
|
|
u32 power;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * struct time_in_idle - Idle time stats
|
|
|
|
+ * @time: previous reading of the absolute time that this cpu was idle
|
|
|
|
+ * @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
|
|
|
|
+ */
|
|
|
|
+struct time_in_idle {
|
|
|
|
+ u64 time;
|
|
|
|
+ u64 timestamp;
|
|
|
|
+};
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* struct cpufreq_cooling_device - data for cooling device with cpufreq
|
|
* struct cpufreq_cooling_device - data for cooling device with cpufreq
|
|
* @id: unique integer value corresponding to each cpufreq_cooling_device
|
|
* @id: unique integer value corresponding to each cpufreq_cooling_device
|
|
* registered.
|
|
* registered.
|
|
- * @cool_dev: thermal_cooling_device pointer to keep track of the
|
|
|
|
- * registered cooling device.
|
|
|
|
|
|
+ * @last_load: load measured by the latest call to cpufreq_get_requested_power()
|
|
* @cpufreq_state: integer value representing the current state of cpufreq
|
|
* @cpufreq_state: integer value representing the current state of cpufreq
|
|
* cooling devices.
|
|
* cooling devices.
|
|
* @clipped_freq: integer value representing the absolute value of the clipped
|
|
* @clipped_freq: integer value representing the absolute value of the clipped
|
|
* frequency.
|
|
* frequency.
|
|
* @max_level: maximum cooling level. One less than total number of valid
|
|
* @max_level: maximum cooling level. One less than total number of valid
|
|
* cpufreq frequencies.
|
|
* cpufreq frequencies.
|
|
- * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
|
|
|
|
|
|
+ * @freq_table: Freq table in descending order of frequencies
|
|
|
|
+ * @cdev: thermal_cooling_device pointer to keep track of the
|
|
|
|
+ * registered cooling device.
|
|
|
|
+ * @policy: cpufreq policy.
|
|
* @node: list_head to link all cpufreq_cooling_device together.
|
|
* @node: list_head to link all cpufreq_cooling_device together.
|
|
- * @last_load: load measured by the latest call to cpufreq_get_requested_power()
|
|
|
|
- * @time_in_idle: previous reading of the absolute time that this cpu was idle
|
|
|
|
- * @time_in_idle_timestamp: wall time of the last invocation of
|
|
|
|
- * get_cpu_idle_time_us()
|
|
|
|
- * @dyn_power_table: array of struct power_table for frequency to power
|
|
|
|
- * conversion, sorted in ascending order.
|
|
|
|
- * @dyn_power_table_entries: number of entries in the @dyn_power_table array
|
|
|
|
- * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
|
|
|
|
|
|
+ * @idle_time: idle time stats
|
|
* @plat_get_static_power: callback to calculate the static power
|
|
* @plat_get_static_power: callback to calculate the static power
|
|
*
|
|
*
|
|
* This structure is required for keeping information of each registered
|
|
* This structure is required for keeping information of each registered
|
|
@@ -90,80 +95,44 @@ struct power_table {
|
|
*/
|
|
*/
|
|
struct cpufreq_cooling_device {
|
|
struct cpufreq_cooling_device {
|
|
int id;
|
|
int id;
|
|
- struct thermal_cooling_device *cool_dev;
|
|
|
|
|
|
+ u32 last_load;
|
|
unsigned int cpufreq_state;
|
|
unsigned int cpufreq_state;
|
|
unsigned int clipped_freq;
|
|
unsigned int clipped_freq;
|
|
unsigned int max_level;
|
|
unsigned int max_level;
|
|
- unsigned int *freq_table; /* In descending order */
|
|
|
|
- struct cpumask allowed_cpus;
|
|
|
|
|
|
+ struct freq_table *freq_table; /* In descending order */
|
|
|
|
+ struct thermal_cooling_device *cdev;
|
|
|
|
+ struct cpufreq_policy *policy;
|
|
struct list_head node;
|
|
struct list_head node;
|
|
- u32 last_load;
|
|
|
|
- u64 *time_in_idle;
|
|
|
|
- u64 *time_in_idle_timestamp;
|
|
|
|
- struct power_table *dyn_power_table;
|
|
|
|
- int dyn_power_table_entries;
|
|
|
|
- struct device *cpu_dev;
|
|
|
|
|
|
+ struct time_in_idle *idle_time;
|
|
get_static_t plat_get_static_power;
|
|
get_static_t plat_get_static_power;
|
|
};
|
|
};
|
|
-static DEFINE_IDA(cpufreq_ida);
|
|
|
|
|
|
|
|
|
|
+static DEFINE_IDA(cpufreq_ida);
|
|
static DEFINE_MUTEX(cooling_list_lock);
|
|
static DEFINE_MUTEX(cooling_list_lock);
|
|
-static LIST_HEAD(cpufreq_dev_list);
|
|
|
|
|
|
+static LIST_HEAD(cpufreq_cdev_list);
|
|
|
|
|
|
/* Below code defines functions to be used for cpufreq as cooling device */
|
|
/* Below code defines functions to be used for cpufreq as cooling device */
|
|
|
|
|
|
/**
|
|
/**
|
|
* get_level: Find the level for a particular frequency
|
|
* get_level: Find the level for a particular frequency
|
|
- * @cpufreq_dev: cpufreq_dev for which the property is required
|
|
|
|
|
|
+ * @cpufreq_cdev: cpufreq_cdev for which the property is required
|
|
* @freq: Frequency
|
|
* @freq: Frequency
|
|
*
|
|
*
|
|
- * Return: level on success, THERMAL_CSTATE_INVALID on error.
|
|
|
|
|
|
+ * Return: level corresponding to the frequency.
|
|
*/
|
|
*/
|
|
-static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
|
|
|
|
|
|
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
unsigned int freq)
|
|
unsigned int freq)
|
|
{
|
|
{
|
|
|
|
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
|
|
unsigned long level;
|
|
unsigned long level;
|
|
|
|
|
|
- for (level = 0; level <= cpufreq_dev->max_level; level++) {
|
|
|
|
- if (freq == cpufreq_dev->freq_table[level])
|
|
|
|
- return level;
|
|
|
|
-
|
|
|
|
- if (freq > cpufreq_dev->freq_table[level])
|
|
|
|
|
|
+ for (level = 1; level <= cpufreq_cdev->max_level; level++)
|
|
|
|
+ if (freq > freq_table[level].frequency)
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
|
|
|
|
- return THERMAL_CSTATE_INVALID;
|
|
|
|
|
|
+ return level - 1;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
|
|
|
|
- * @cpu: cpu for which the level is required
|
|
|
|
- * @freq: the frequency of interest
|
|
|
|
- *
|
|
|
|
- * This function will match the cooling level corresponding to the
|
|
|
|
- * requested @freq and return it.
|
|
|
|
- *
|
|
|
|
- * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
|
|
|
|
- * otherwise.
|
|
|
|
- */
|
|
|
|
-unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
|
|
|
|
-{
|
|
|
|
- struct cpufreq_cooling_device *cpufreq_dev;
|
|
|
|
-
|
|
|
|
- mutex_lock(&cooling_list_lock);
|
|
|
|
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
|
|
|
- if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
|
|
|
|
- mutex_unlock(&cooling_list_lock);
|
|
|
|
- return get_level(cpufreq_dev, freq);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- mutex_unlock(&cooling_list_lock);
|
|
|
|
-
|
|
|
|
- pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
|
|
|
|
- return THERMAL_CSTATE_INVALID;
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
|
|
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
|
|
* @nb: struct notifier_block * with callback info.
|
|
* @nb: struct notifier_block * with callback info.
|
|
@@ -181,14 +150,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|
{
|
|
{
|
|
struct cpufreq_policy *policy = data;
|
|
struct cpufreq_policy *policy = data;
|
|
unsigned long clipped_freq;
|
|
unsigned long clipped_freq;
|
|
- struct cpufreq_cooling_device *cpufreq_dev;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev;
|
|
|
|
|
|
if (event != CPUFREQ_ADJUST)
|
|
if (event != CPUFREQ_ADJUST)
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
mutex_lock(&cooling_list_lock);
|
|
mutex_lock(&cooling_list_lock);
|
|
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
|
|
|
- if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
|
|
|
|
|
|
+ list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
|
|
|
|
+ /*
|
|
|
|
+ * A new copy of the policy is sent to the notifier and can't
|
|
|
|
+ * compare that directly.
|
|
|
|
+ */
|
|
|
|
+ if (policy->cpu != cpufreq_cdev->policy->cpu)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -202,7 +175,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|
* But, if clipped_freq is greater than policy->max, we don't
|
|
* But, if clipped_freq is greater than policy->max, we don't
|
|
* need to do anything.
|
|
* need to do anything.
|
|
*/
|
|
*/
|
|
- clipped_freq = cpufreq_dev->clipped_freq;
|
|
|
|
|
|
+ clipped_freq = cpufreq_cdev->clipped_freq;
|
|
|
|
|
|
if (policy->max > clipped_freq)
|
|
if (policy->max > clipped_freq)
|
|
cpufreq_verify_within_limits(policy, 0, clipped_freq);
|
|
cpufreq_verify_within_limits(policy, 0, clipped_freq);
|
|
@@ -214,63 +187,63 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * build_dyn_power_table() - create a dynamic power to frequency table
|
|
|
|
- * @cpufreq_device: the cpufreq cooling device in which to store the table
|
|
|
|
|
|
+ * update_freq_table() - Update the freq table with power numbers
|
|
|
|
+ * @cpufreq_cdev: the cpufreq cooling device in which to update the table
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
*
|
|
*
|
|
- * Build a dynamic power to frequency table for this cpu and store it
|
|
|
|
- * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
|
|
|
|
- * cpu_freq_to_power() to convert between power and frequency
|
|
|
|
- * efficiently. Power is stored in mW, frequency in KHz. The
|
|
|
|
- * resulting table is in ascending order.
|
|
|
|
|
|
+ * Update the freq table with power numbers. This table will be used in
|
|
|
|
+ * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
|
|
|
|
+ * frequency efficiently. Power is stored in mW, frequency in KHz. The
|
|
|
|
+ * resulting table is in descending order.
|
|
*
|
|
*
|
|
* Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
|
|
* Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
|
|
- * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
|
|
|
|
- * added/enabled while the function was executing.
|
|
|
|
|
|
+ * or -ENOMEM if we run out of memory.
|
|
*/
|
|
*/
|
|
-static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
|
|
|
|
- u32 capacitance)
|
|
|
|
|
|
+static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
|
|
+ u32 capacitance)
|
|
{
|
|
{
|
|
- struct power_table *power_table;
|
|
|
|
|
|
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
|
|
struct dev_pm_opp *opp;
|
|
struct dev_pm_opp *opp;
|
|
struct device *dev = NULL;
|
|
struct device *dev = NULL;
|
|
- int num_opps = 0, cpu, i, ret = 0;
|
|
|
|
- unsigned long freq;
|
|
|
|
-
|
|
|
|
- for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
|
|
|
|
- dev = get_cpu_device(cpu);
|
|
|
|
- if (!dev) {
|
|
|
|
- dev_warn(&cpufreq_device->cool_dev->device,
|
|
|
|
- "No cpu device for cpu %d\n", cpu);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
|
|
+ int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
|
|
|
|
|
|
- num_opps = dev_pm_opp_get_opp_count(dev);
|
|
|
|
- if (num_opps > 0)
|
|
|
|
- break;
|
|
|
|
- else if (num_opps < 0)
|
|
|
|
- return num_opps;
|
|
|
|
|
|
+ dev = get_cpu_device(cpu);
|
|
|
|
+ if (unlikely(!dev)) {
|
|
|
|
+ dev_warn(&cpufreq_cdev->cdev->device,
|
|
|
|
+ "No cpu device for cpu %d\n", cpu);
|
|
|
|
+ return -ENODEV;
|
|
}
|
|
}
|
|
|
|
|
|
- if (num_opps == 0)
|
|
|
|
- return -EINVAL;
|
|
|
|
|
|
+ num_opps = dev_pm_opp_get_opp_count(dev);
|
|
|
|
+ if (num_opps < 0)
|
|
|
|
+ return num_opps;
|
|
|
|
|
|
- power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
|
|
|
|
- if (!power_table)
|
|
|
|
- return -ENOMEM;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * The cpufreq table is also built from the OPP table and so the count
|
|
|
|
+ * should match.
|
|
|
|
+ */
|
|
|
|
+ if (num_opps != cpufreq_cdev->max_level + 1) {
|
|
|
|
+ dev_warn(dev, "Number of OPPs not matching with max_levels\n");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
|
|
- for (freq = 0, i = 0;
|
|
|
|
- opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
|
|
|
|
- freq++, i++) {
|
|
|
|
- u32 freq_mhz, voltage_mv;
|
|
|
|
|
|
+ for (i = 0; i <= cpufreq_cdev->max_level; i++) {
|
|
|
|
+ unsigned long freq = freq_table[i].frequency * 1000;
|
|
|
|
+ u32 freq_mhz = freq_table[i].frequency / 1000;
|
|
u64 power;
|
|
u64 power;
|
|
|
|
+ u32 voltage_mv;
|
|
|
|
|
|
- if (i >= num_opps) {
|
|
|
|
- ret = -EAGAIN;
|
|
|
|
- goto free_power_table;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Find ceil frequency as 'freq' may be slightly lower than OPP
|
|
|
|
+ * freq due to truncation while converting to kHz.
|
|
|
|
+ */
|
|
|
|
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
|
|
|
|
+ if (IS_ERR(opp)) {
|
|
|
|
+ dev_err(dev, "failed to get opp for %lu frequency\n",
|
|
|
|
+ freq);
|
|
|
|
+ return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- freq_mhz = freq / 1000000;
|
|
|
|
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
|
|
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
|
|
dev_pm_opp_put(opp);
|
|
dev_pm_opp_put(opp);
|
|
|
|
|
|
@@ -281,89 +254,73 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
|
|
power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
|
|
power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
|
|
do_div(power, 1000000000);
|
|
do_div(power, 1000000000);
|
|
|
|
|
|
- /* frequency is stored in power_table in KHz */
|
|
|
|
- power_table[i].frequency = freq / 1000;
|
|
|
|
-
|
|
|
|
/* power is stored in mW */
|
|
/* power is stored in mW */
|
|
- power_table[i].power = power;
|
|
|
|
|
|
+ freq_table[i].power = power;
|
|
}
|
|
}
|
|
|
|
|
|
- if (i != num_opps) {
|
|
|
|
- ret = PTR_ERR(opp);
|
|
|
|
- goto free_power_table;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- cpufreq_device->cpu_dev = dev;
|
|
|
|
- cpufreq_device->dyn_power_table = power_table;
|
|
|
|
- cpufreq_device->dyn_power_table_entries = i;
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
-free_power_table:
|
|
|
|
- kfree(power_table);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
|
|
|
|
|
|
+static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
u32 freq)
|
|
u32 freq)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
- struct power_table *pt = cpufreq_device->dyn_power_table;
|
|
|
|
|
|
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
|
|
|
|
|
|
- for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
|
|
|
|
- if (freq < pt[i].frequency)
|
|
|
|
|
|
+ for (i = 1; i <= cpufreq_cdev->max_level; i++)
|
|
|
|
+ if (freq > freq_table[i].frequency)
|
|
break;
|
|
break;
|
|
|
|
|
|
- return pt[i - 1].power;
|
|
|
|
|
|
+ return freq_table[i - 1].power;
|
|
}
|
|
}
|
|
|
|
|
|
-static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
|
|
|
|
|
|
+static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
u32 power)
|
|
u32 power)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
- struct power_table *pt = cpufreq_device->dyn_power_table;
|
|
|
|
|
|
+ struct freq_table *freq_table = cpufreq_cdev->freq_table;
|
|
|
|
|
|
- for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
|
|
|
|
- if (power < pt[i].power)
|
|
|
|
|
|
+ for (i = 1; i <= cpufreq_cdev->max_level; i++)
|
|
|
|
+ if (power > freq_table[i].power)
|
|
break;
|
|
break;
|
|
|
|
|
|
- return pt[i - 1].frequency;
|
|
|
|
|
|
+ return freq_table[i - 1].frequency;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* get_load() - get load for a cpu since last updated
|
|
* get_load() - get load for a cpu since last updated
|
|
- * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
|
|
|
|
|
|
+ * @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
|
|
* @cpu: cpu number
|
|
* @cpu: cpu number
|
|
- * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
|
|
|
|
|
|
+ * @cpu_idx: index of the cpu in time_in_idle*
|
|
*
|
|
*
|
|
* Return: The average load of cpu @cpu in percentage since this
|
|
* Return: The average load of cpu @cpu in percentage since this
|
|
* function was last called.
|
|
* function was last called.
|
|
*/
|
|
*/
|
|
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
|
|
|
|
|
|
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
|
|
int cpu_idx)
|
|
int cpu_idx)
|
|
{
|
|
{
|
|
u32 load;
|
|
u32 load;
|
|
u64 now, now_idle, delta_time, delta_idle;
|
|
u64 now, now_idle, delta_time, delta_idle;
|
|
|
|
+ struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
|
|
|
|
|
|
now_idle = get_cpu_idle_time(cpu, &now, 0);
|
|
now_idle = get_cpu_idle_time(cpu, &now, 0);
|
|
- delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
|
|
|
|
- delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
|
|
|
|
|
|
+ delta_idle = now_idle - idle_time->time;
|
|
|
|
+ delta_time = now - idle_time->timestamp;
|
|
|
|
|
|
if (delta_time <= delta_idle)
|
|
if (delta_time <= delta_idle)
|
|
load = 0;
|
|
load = 0;
|
|
else
|
|
else
|
|
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
|
|
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
|
|
|
|
|
|
- cpufreq_device->time_in_idle[cpu_idx] = now_idle;
|
|
|
|
- cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
|
|
|
|
|
|
+ idle_time->time = now_idle;
|
|
|
|
+ idle_time->timestamp = now;
|
|
|
|
|
|
return load;
|
|
return load;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* get_static_power() - calculate the static power consumed by the cpus
|
|
* get_static_power() - calculate the static power consumed by the cpus
|
|
- * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
|
|
|
|
|
|
+ * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
|
|
* @tz: thermal zone device in which we're operating
|
|
* @tz: thermal zone device in which we're operating
|
|
* @freq: frequency in KHz
|
|
* @freq: frequency in KHz
|
|
* @power: pointer in which to store the calculated static power
|
|
* @power: pointer in which to store the calculated static power
|
|
@@ -376,26 +333,28 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
|
|
*
|
|
*
|
|
* Return: 0 on success, -E* on failure.
|
|
* Return: 0 on success, -E* on failure.
|
|
*/
|
|
*/
|
|
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
|
|
|
|
|
|
+static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
struct thermal_zone_device *tz, unsigned long freq,
|
|
struct thermal_zone_device *tz, unsigned long freq,
|
|
u32 *power)
|
|
u32 *power)
|
|
{
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
struct dev_pm_opp *opp;
|
|
unsigned long voltage;
|
|
unsigned long voltage;
|
|
- struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
|
|
|
|
|
|
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
|
|
|
|
+ struct cpumask *cpumask = policy->related_cpus;
|
|
unsigned long freq_hz = freq * 1000;
|
|
unsigned long freq_hz = freq * 1000;
|
|
|
|
+ struct device *dev;
|
|
|
|
|
|
- if (!cpufreq_device->plat_get_static_power ||
|
|
|
|
- !cpufreq_device->cpu_dev) {
|
|
|
|
|
|
+ if (!cpufreq_cdev->plat_get_static_power) {
|
|
*power = 0;
|
|
*power = 0;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
|
|
|
|
- true);
|
|
|
|
|
|
+ dev = get_cpu_device(policy->cpu);
|
|
|
|
+ WARN_ON(!dev);
|
|
|
|
+
|
|
|
|
+ opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
|
|
if (IS_ERR(opp)) {
|
|
if (IS_ERR(opp)) {
|
|
- dev_warn_ratelimited(cpufreq_device->cpu_dev,
|
|
|
|
- "Failed to find OPP for frequency %lu: %ld\n",
|
|
|
|
|
|
+ dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
|
|
freq_hz, PTR_ERR(opp));
|
|
freq_hz, PTR_ERR(opp));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -404,31 +363,30 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
|
|
dev_pm_opp_put(opp);
|
|
dev_pm_opp_put(opp);
|
|
|
|
|
|
if (voltage == 0) {
|
|
if (voltage == 0) {
|
|
- dev_err_ratelimited(cpufreq_device->cpu_dev,
|
|
|
|
- "Failed to get voltage for frequency %lu\n",
|
|
|
|
|
|
+ dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
|
|
freq_hz);
|
|
freq_hz);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
|
|
|
|
- voltage, power);
|
|
|
|
|
|
+ return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
|
|
|
|
+ voltage, power);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* get_dynamic_power() - calculate the dynamic power
|
|
* get_dynamic_power() - calculate the dynamic power
|
|
- * @cpufreq_device: &cpufreq_cooling_device for this cdev
|
|
|
|
|
|
+ * @cpufreq_cdev: &cpufreq_cooling_device for this cdev
|
|
* @freq: current frequency
|
|
* @freq: current frequency
|
|
*
|
|
*
|
|
* Return: the dynamic power consumed by the cpus described by
|
|
* Return: the dynamic power consumed by the cpus described by
|
|
- * @cpufreq_device.
|
|
|
|
|
|
+ * @cpufreq_cdev.
|
|
*/
|
|
*/
|
|
-static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
|
|
|
|
|
|
+static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
unsigned long freq)
|
|
unsigned long freq)
|
|
{
|
|
{
|
|
u32 raw_cpu_power;
|
|
u32 raw_cpu_power;
|
|
|
|
|
|
- raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
|
|
|
|
- return (raw_cpu_power * cpufreq_device->last_load) / 100;
|
|
|
|
|
|
+ raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
|
|
|
|
+ return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
|
|
}
|
|
}
|
|
|
|
|
|
/* cpufreq cooling device callback functions are defined below */
|
|
/* cpufreq cooling device callback functions are defined below */
|
|
@@ -446,9 +404,9 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
|
|
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
unsigned long *state)
|
|
unsigned long *state)
|
|
{
|
|
{
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
- *state = cpufreq_device->max_level;
|
|
|
|
|
|
+ *state = cpufreq_cdev->max_level;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -465,9 +423,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
unsigned long *state)
|
|
unsigned long *state)
|
|
{
|
|
{
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
- *state = cpufreq_device->cpufreq_state;
|
|
|
|
|
|
+ *state = cpufreq_cdev->cpufreq_state;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -485,23 +443,22 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|
unsigned long state)
|
|
unsigned long state)
|
|
{
|
|
{
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
- unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
unsigned int clip_freq;
|
|
unsigned int clip_freq;
|
|
|
|
|
|
/* Request state should be less than max_level */
|
|
/* Request state should be less than max_level */
|
|
- if (WARN_ON(state > cpufreq_device->max_level))
|
|
|
|
|
|
+ if (WARN_ON(state > cpufreq_cdev->max_level))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
/* Check if the old cooling action is same as new cooling action */
|
|
/* Check if the old cooling action is same as new cooling action */
|
|
- if (cpufreq_device->cpufreq_state == state)
|
|
|
|
|
|
+ if (cpufreq_cdev->cpufreq_state == state)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- clip_freq = cpufreq_device->freq_table[state];
|
|
|
|
- cpufreq_device->cpufreq_state = state;
|
|
|
|
- cpufreq_device->clipped_freq = clip_freq;
|
|
|
|
|
|
+ clip_freq = cpufreq_cdev->freq_table[state].frequency;
|
|
|
|
+ cpufreq_cdev->cpufreq_state = state;
|
|
|
|
+ cpufreq_cdev->clipped_freq = clip_freq;
|
|
|
|
|
|
- cpufreq_update_policy(cpu);
|
|
|
|
|
|
+ cpufreq_update_policy(cpufreq_cdev->policy->cpu);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -536,33 +493,23 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
|
|
unsigned long freq;
|
|
unsigned long freq;
|
|
int i = 0, cpu, ret;
|
|
int i = 0, cpu, ret;
|
|
u32 static_power, dynamic_power, total_load = 0;
|
|
u32 static_power, dynamic_power, total_load = 0;
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
|
|
u32 *load_cpu = NULL;
|
|
u32 *load_cpu = NULL;
|
|
|
|
|
|
- cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * All the CPUs are offline, thus the requested power by
|
|
|
|
- * the cdev is 0
|
|
|
|
- */
|
|
|
|
- if (cpu >= nr_cpu_ids) {
|
|
|
|
- *power = 0;
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- freq = cpufreq_quick_get(cpu);
|
|
|
|
|
|
+ freq = cpufreq_quick_get(policy->cpu);
|
|
|
|
|
|
if (trace_thermal_power_cpu_get_power_enabled()) {
|
|
if (trace_thermal_power_cpu_get_power_enabled()) {
|
|
- u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
|
|
|
|
|
|
+ u32 ncpus = cpumask_weight(policy->related_cpus);
|
|
|
|
|
|
load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
|
|
load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
|
|
}
|
|
}
|
|
|
|
|
|
- for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
|
|
|
|
|
|
+ for_each_cpu(cpu, policy->related_cpus) {
|
|
u32 load;
|
|
u32 load;
|
|
|
|
|
|
if (cpu_online(cpu))
|
|
if (cpu_online(cpu))
|
|
- load = get_load(cpufreq_device, cpu, i);
|
|
|
|
|
|
+ load = get_load(cpufreq_cdev, cpu, i);
|
|
else
|
|
else
|
|
load = 0;
|
|
load = 0;
|
|
|
|
|
|
@@ -573,19 +520,19 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
|
|
|
|
- cpufreq_device->last_load = total_load;
|
|
|
|
|
|
+ cpufreq_cdev->last_load = total_load;
|
|
|
|
|
|
- dynamic_power = get_dynamic_power(cpufreq_device, freq);
|
|
|
|
- ret = get_static_power(cpufreq_device, tz, freq, &static_power);
|
|
|
|
|
|
+ dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
|
|
|
|
+ ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
|
|
if (ret) {
|
|
if (ret) {
|
|
kfree(load_cpu);
|
|
kfree(load_cpu);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
if (load_cpu) {
|
|
if (load_cpu) {
|
|
- trace_thermal_power_cpu_get_power(
|
|
|
|
- &cpufreq_device->allowed_cpus,
|
|
|
|
- freq, load_cpu, i, dynamic_power, static_power);
|
|
|
|
|
|
+ trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
|
|
|
|
+ load_cpu, i, dynamic_power,
|
|
|
|
+ static_power);
|
|
|
|
|
|
kfree(load_cpu);
|
|
kfree(load_cpu);
|
|
}
|
|
}
|
|
@@ -614,38 +561,23 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
|
|
unsigned long state, u32 *power)
|
|
unsigned long state, u32 *power)
|
|
{
|
|
{
|
|
unsigned int freq, num_cpus;
|
|
unsigned int freq, num_cpus;
|
|
- cpumask_var_t cpumask;
|
|
|
|
u32 static_power, dynamic_power;
|
|
u32 static_power, dynamic_power;
|
|
int ret;
|
|
int ret;
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
-
|
|
|
|
- if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
|
|
|
|
- num_cpus = cpumask_weight(cpumask);
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
- /* None of our cpus are online, so no power */
|
|
|
|
- if (num_cpus == 0) {
|
|
|
|
- *power = 0;
|
|
|
|
- ret = 0;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Request state should be less than max_level */
|
|
|
|
+ if (WARN_ON(state > cpufreq_cdev->max_level))
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- freq = cpufreq_device->freq_table[state];
|
|
|
|
- if (!freq) {
|
|
|
|
- ret = -EINVAL;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
|
|
+ num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
|
|
|
|
|
|
- dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
|
|
|
|
- ret = get_static_power(cpufreq_device, tz, freq, &static_power);
|
|
|
|
|
|
+ freq = cpufreq_cdev->freq_table[state].frequency;
|
|
|
|
+ dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
|
|
|
|
+ ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
|
|
if (ret)
|
|
if (ret)
|
|
- goto out;
|
|
|
|
|
|
+ return ret;
|
|
|
|
|
|
*power = static_power + dynamic_power;
|
|
*power = static_power + dynamic_power;
|
|
-out:
|
|
|
|
- free_cpumask_var(cpumask);
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -673,39 +605,27 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
|
|
struct thermal_zone_device *tz, u32 power,
|
|
struct thermal_zone_device *tz, u32 power,
|
|
unsigned long *state)
|
|
unsigned long *state)
|
|
{
|
|
{
|
|
- unsigned int cpu, cur_freq, target_freq;
|
|
|
|
|
|
+ unsigned int cur_freq, target_freq;
|
|
int ret;
|
|
int ret;
|
|
s32 dyn_power;
|
|
s32 dyn_power;
|
|
u32 last_load, normalised_power, static_power;
|
|
u32 last_load, normalised_power, static_power;
|
|
- struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
+ struct cpufreq_policy *policy = cpufreq_cdev->policy;
|
|
|
|
|
|
- cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
|
|
|
|
-
|
|
|
|
- /* None of our cpus are online */
|
|
|
|
- if (cpu >= nr_cpu_ids)
|
|
|
|
- return -ENODEV;
|
|
|
|
-
|
|
|
|
- cur_freq = cpufreq_quick_get(cpu);
|
|
|
|
- ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
|
|
|
|
|
|
+ cur_freq = cpufreq_quick_get(policy->cpu);
|
|
|
|
+ ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
dyn_power = power - static_power;
|
|
dyn_power = power - static_power;
|
|
dyn_power = dyn_power > 0 ? dyn_power : 0;
|
|
dyn_power = dyn_power > 0 ? dyn_power : 0;
|
|
- last_load = cpufreq_device->last_load ?: 1;
|
|
|
|
|
|
+ last_load = cpufreq_cdev->last_load ?: 1;
|
|
normalised_power = (dyn_power * 100) / last_load;
|
|
normalised_power = (dyn_power * 100) / last_load;
|
|
- target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
|
|
|
|
|
|
+ target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
|
|
|
|
|
|
- *state = cpufreq_cooling_get_level(cpu, target_freq);
|
|
|
|
- if (*state == THERMAL_CSTATE_INVALID) {
|
|
|
|
- dev_err_ratelimited(&cdev->device,
|
|
|
|
- "Failed to convert %dKHz for cpu %d into a cdev state\n",
|
|
|
|
- target_freq, cpu);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
|
|
|
|
- target_freq, *state, power);
|
|
|
|
|
|
+ *state = get_level(cpufreq_cdev, target_freq);
|
|
|
|
+ trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
|
|
|
|
+ power);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -748,7 +668,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
|
|
/**
|
|
/**
|
|
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
|
|
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
|
|
|
|
|
|
+ * @policy: cpufreq policy
|
|
* Normally this should be same as cpufreq policy->related_cpus.
|
|
* Normally this should be same as cpufreq policy->related_cpus.
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
@@ -764,102 +684,68 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
|
|
*/
|
|
*/
|
|
static struct thermal_cooling_device *
|
|
static struct thermal_cooling_device *
|
|
__cpufreq_cooling_register(struct device_node *np,
|
|
__cpufreq_cooling_register(struct device_node *np,
|
|
- const struct cpumask *clip_cpus, u32 capacitance,
|
|
|
|
|
|
+ struct cpufreq_policy *policy, u32 capacitance,
|
|
get_static_t plat_static_func)
|
|
get_static_t plat_static_func)
|
|
{
|
|
{
|
|
- struct cpufreq_policy *policy;
|
|
|
|
- struct thermal_cooling_device *cool_dev;
|
|
|
|
- struct cpufreq_cooling_device *cpufreq_dev;
|
|
|
|
|
|
+ struct thermal_cooling_device *cdev;
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev;
|
|
char dev_name[THERMAL_NAME_LENGTH];
|
|
char dev_name[THERMAL_NAME_LENGTH];
|
|
- struct cpufreq_frequency_table *pos, *table;
|
|
|
|
- cpumask_var_t temp_mask;
|
|
|
|
unsigned int freq, i, num_cpus;
|
|
unsigned int freq, i, num_cpus;
|
|
int ret;
|
|
int ret;
|
|
struct thermal_cooling_device_ops *cooling_ops;
|
|
struct thermal_cooling_device_ops *cooling_ops;
|
|
bool first;
|
|
bool first;
|
|
|
|
|
|
- if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
|
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
|
-
|
|
|
|
- cpumask_and(temp_mask, clip_cpus, cpu_online_mask);
|
|
|
|
- policy = cpufreq_cpu_get(cpumask_first(temp_mask));
|
|
|
|
- if (!policy) {
|
|
|
|
- pr_debug("%s: CPUFreq policy not found\n", __func__);
|
|
|
|
- cool_dev = ERR_PTR(-EPROBE_DEFER);
|
|
|
|
- goto free_cpumask;
|
|
|
|
|
|
+ if (IS_ERR_OR_NULL(policy)) {
|
|
|
|
+ pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy);
|
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
}
|
|
}
|
|
|
|
|
|
- table = policy->freq_table;
|
|
|
|
- if (!table) {
|
|
|
|
- pr_debug("%s: CPUFreq table not found\n", __func__);
|
|
|
|
- cool_dev = ERR_PTR(-ENODEV);
|
|
|
|
- goto put_policy;
|
|
|
|
|
|
+ i = cpufreq_table_count_valid_entries(policy);
|
|
|
|
+ if (!i) {
|
|
|
|
+ pr_debug("%s: CPUFreq table not found or has no valid entries\n",
|
|
|
|
+ __func__);
|
|
|
|
+ return ERR_PTR(-ENODEV);
|
|
}
|
|
}
|
|
|
|
|
|
- cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
|
|
|
|
- if (!cpufreq_dev) {
|
|
|
|
- cool_dev = ERR_PTR(-ENOMEM);
|
|
|
|
- goto put_policy;
|
|
|
|
- }
|
|
|
|
|
|
+ cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
|
|
|
|
+ if (!cpufreq_cdev)
|
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- num_cpus = cpumask_weight(clip_cpus);
|
|
|
|
- cpufreq_dev->time_in_idle = kcalloc(num_cpus,
|
|
|
|
- sizeof(*cpufreq_dev->time_in_idle),
|
|
|
|
- GFP_KERNEL);
|
|
|
|
- if (!cpufreq_dev->time_in_idle) {
|
|
|
|
- cool_dev = ERR_PTR(-ENOMEM);
|
|
|
|
|
|
+ cpufreq_cdev->policy = policy;
|
|
|
|
+ num_cpus = cpumask_weight(policy->related_cpus);
|
|
|
|
+ cpufreq_cdev->idle_time = kcalloc(num_cpus,
|
|
|
|
+ sizeof(*cpufreq_cdev->idle_time),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+ if (!cpufreq_cdev->idle_time) {
|
|
|
|
+ cdev = ERR_PTR(-ENOMEM);
|
|
goto free_cdev;
|
|
goto free_cdev;
|
|
}
|
|
}
|
|
|
|
|
|
- cpufreq_dev->time_in_idle_timestamp =
|
|
|
|
- kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
|
|
|
|
- GFP_KERNEL);
|
|
|
|
- if (!cpufreq_dev->time_in_idle_timestamp) {
|
|
|
|
- cool_dev = ERR_PTR(-ENOMEM);
|
|
|
|
- goto free_time_in_idle;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Find max levels */
|
|
|
|
- cpufreq_for_each_valid_entry(pos, table)
|
|
|
|
- cpufreq_dev->max_level++;
|
|
|
|
-
|
|
|
|
- cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
|
|
|
|
- cpufreq_dev->max_level, GFP_KERNEL);
|
|
|
|
- if (!cpufreq_dev->freq_table) {
|
|
|
|
- cool_dev = ERR_PTR(-ENOMEM);
|
|
|
|
- goto free_time_in_idle_timestamp;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* max_level is an index, not a counter */
|
|
/* max_level is an index, not a counter */
|
|
- cpufreq_dev->max_level--;
|
|
|
|
-
|
|
|
|
- cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
|
|
|
|
-
|
|
|
|
- if (capacitance) {
|
|
|
|
- cpufreq_dev->plat_get_static_power = plat_static_func;
|
|
|
|
-
|
|
|
|
- ret = build_dyn_power_table(cpufreq_dev, capacitance);
|
|
|
|
- if (ret) {
|
|
|
|
- cool_dev = ERR_PTR(ret);
|
|
|
|
- goto free_table;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- cooling_ops = &cpufreq_power_cooling_ops;
|
|
|
|
- } else {
|
|
|
|
- cooling_ops = &cpufreq_cooling_ops;
|
|
|
|
|
|
+ cpufreq_cdev->max_level = i - 1;
|
|
|
|
+
|
|
|
|
+ cpufreq_cdev->freq_table = kmalloc_array(i,
|
|
|
|
+ sizeof(*cpufreq_cdev->freq_table),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+ if (!cpufreq_cdev->freq_table) {
|
|
|
|
+ cdev = ERR_PTR(-ENOMEM);
|
|
|
|
+ goto free_idle_time;
|
|
}
|
|
}
|
|
|
|
|
|
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
|
|
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
- cool_dev = ERR_PTR(ret);
|
|
|
|
- goto free_power_table;
|
|
|
|
|
|
+ cdev = ERR_PTR(ret);
|
|
|
|
+ goto free_table;
|
|
}
|
|
}
|
|
- cpufreq_dev->id = ret;
|
|
|
|
|
|
+ cpufreq_cdev->id = ret;
|
|
|
|
+
|
|
|
|
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
|
|
|
|
+ cpufreq_cdev->id);
|
|
|
|
|
|
/* Fill freq-table in descending order of frequencies */
|
|
/* Fill freq-table in descending order of frequencies */
|
|
- for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
|
|
|
|
- freq = find_next_max(table, freq);
|
|
|
|
- cpufreq_dev->freq_table[i] = freq;
|
|
|
|
|
|
+ for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
|
|
|
|
+ freq = find_next_max(policy->freq_table, freq);
|
|
|
|
+ cpufreq_cdev->freq_table[i].frequency = freq;
|
|
|
|
|
|
/* Warn for duplicate entries */
|
|
/* Warn for duplicate entries */
|
|
if (!freq)
|
|
if (!freq)
|
|
@@ -868,51 +754,54 @@ __cpufreq_cooling_register(struct device_node *np,
|
|
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
|
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
|
}
|
|
}
|
|
|
|
|
|
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
|
|
|
|
- cpufreq_dev->id);
|
|
|
|
|
|
+ if (capacitance) {
|
|
|
|
+ cpufreq_cdev->plat_get_static_power = plat_static_func;
|
|
|
|
+
|
|
|
|
+ ret = update_freq_table(cpufreq_cdev, capacitance);
|
|
|
|
+ if (ret) {
|
|
|
|
+ cdev = ERR_PTR(ret);
|
|
|
|
+ goto remove_ida;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cooling_ops = &cpufreq_power_cooling_ops;
|
|
|
|
+ } else {
|
|
|
|
+ cooling_ops = &cpufreq_cooling_ops;
|
|
|
|
+ }
|
|
|
|
|
|
- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
|
|
|
|
- cooling_ops);
|
|
|
|
- if (IS_ERR(cool_dev))
|
|
|
|
|
|
+ cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
|
|
|
|
+ cooling_ops);
|
|
|
|
+ if (IS_ERR(cdev))
|
|
goto remove_ida;
|
|
goto remove_ida;
|
|
|
|
|
|
- cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
|
|
|
|
- cpufreq_dev->cool_dev = cool_dev;
|
|
|
|
|
|
+ cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
|
|
|
|
+ cpufreq_cdev->cdev = cdev;
|
|
|
|
|
|
mutex_lock(&cooling_list_lock);
|
|
mutex_lock(&cooling_list_lock);
|
|
/* Register the notifier for first cpufreq cooling device */
|
|
/* Register the notifier for first cpufreq cooling device */
|
|
- first = list_empty(&cpufreq_dev_list);
|
|
|
|
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
|
|
|
|
|
+ first = list_empty(&cpufreq_cdev_list);
|
|
|
|
+ list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
|
|
mutex_unlock(&cooling_list_lock);
|
|
mutex_unlock(&cooling_list_lock);
|
|
|
|
|
|
if (first)
|
|
if (first)
|
|
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
|
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
|
|
|
|
- goto put_policy;
|
|
|
|
|
|
+ return cdev;
|
|
|
|
|
|
remove_ida:
|
|
remove_ida:
|
|
- ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
|
|
|
|
-free_power_table:
|
|
|
|
- kfree(cpufreq_dev->dyn_power_table);
|
|
|
|
|
|
+ ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
|
|
free_table:
|
|
free_table:
|
|
- kfree(cpufreq_dev->freq_table);
|
|
|
|
-free_time_in_idle_timestamp:
|
|
|
|
- kfree(cpufreq_dev->time_in_idle_timestamp);
|
|
|
|
-free_time_in_idle:
|
|
|
|
- kfree(cpufreq_dev->time_in_idle);
|
|
|
|
|
|
+ kfree(cpufreq_cdev->freq_table);
|
|
|
|
+free_idle_time:
|
|
|
|
+ kfree(cpufreq_cdev->idle_time);
|
|
free_cdev:
|
|
free_cdev:
|
|
- kfree(cpufreq_dev);
|
|
|
|
-put_policy:
|
|
|
|
- cpufreq_cpu_put(policy);
|
|
|
|
-free_cpumask:
|
|
|
|
- free_cpumask_var(temp_mask);
|
|
|
|
- return cool_dev;
|
|
|
|
|
|
+ kfree(cpufreq_cdev);
|
|
|
|
+ return cdev;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* cpufreq_cooling_register - function to create cpufreq cooling device.
|
|
* cpufreq_cooling_register - function to create cpufreq cooling device.
|
|
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
|
|
|
|
|
|
+ * @policy: cpufreq policy
|
|
*
|
|
*
|
|
* This interface function registers the cpufreq cooling device with the name
|
|
* This interface function registers the cpufreq cooling device with the name
|
|
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
|
|
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
|
|
@@ -922,16 +811,16 @@ free_cpumask:
|
|
* on failure, it returns a corresponding ERR_PTR().
|
|
* on failure, it returns a corresponding ERR_PTR().
|
|
*/
|
|
*/
|
|
struct thermal_cooling_device *
|
|
struct thermal_cooling_device *
|
|
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
|
|
|
|
|
|
+cpufreq_cooling_register(struct cpufreq_policy *policy)
|
|
{
|
|
{
|
|
- return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
|
|
|
|
|
|
+ return __cpufreq_cooling_register(NULL, policy, 0, NULL);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
|
|
|
|
|
|
/**
|
|
/**
|
|
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
|
|
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
|
|
|
|
|
|
+ * @policy: cpufreq policy
|
|
*
|
|
*
|
|
* This interface function registers the cpufreq cooling device with the name
|
|
* This interface function registers the cpufreq cooling device with the name
|
|
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
|
|
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
|
|
@@ -943,18 +832,18 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
|
|
*/
|
|
*/
|
|
struct thermal_cooling_device *
|
|
struct thermal_cooling_device *
|
|
of_cpufreq_cooling_register(struct device_node *np,
|
|
of_cpufreq_cooling_register(struct device_node *np,
|
|
- const struct cpumask *clip_cpus)
|
|
|
|
|
|
+ struct cpufreq_policy *policy)
|
|
{
|
|
{
|
|
if (!np)
|
|
if (!np)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
- return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
|
|
|
|
|
|
+ return __cpufreq_cooling_register(np, policy, 0, NULL);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
|
|
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
|
|
|
|
|
|
/**
|
|
/**
|
|
* cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
|
|
* cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
|
|
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
|
|
|
|
|
|
+ * @policy: cpufreq policy
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
* cpus (optional)
|
|
* cpus (optional)
|
|
@@ -974,10 +863,10 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
|
|
* on failure, it returns a corresponding ERR_PTR().
|
|
* on failure, it returns a corresponding ERR_PTR().
|
|
*/
|
|
*/
|
|
struct thermal_cooling_device *
|
|
struct thermal_cooling_device *
|
|
-cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
|
|
|
|
|
|
+cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
|
|
get_static_t plat_static_func)
|
|
get_static_t plat_static_func)
|
|
{
|
|
{
|
|
- return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
|
|
|
|
|
|
+ return __cpufreq_cooling_register(NULL, policy, capacitance,
|
|
plat_static_func);
|
|
plat_static_func);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(cpufreq_power_cooling_register);
|
|
EXPORT_SYMBOL(cpufreq_power_cooling_register);
|
|
@@ -985,7 +874,7 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
|
|
/**
|
|
/**
|
|
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
|
|
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
|
- * @clip_cpus: cpumask of cpus where the frequency constraints will happen
|
|
|
|
|
|
+ * @policy: cpufreq policy
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @capacitance: dynamic power coefficient for these cpus
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
* @plat_static_func: function to calculate the static power consumed by these
|
|
* cpus (optional)
|
|
* cpus (optional)
|
|
@@ -1007,14 +896,14 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register);
|
|
*/
|
|
*/
|
|
struct thermal_cooling_device *
|
|
struct thermal_cooling_device *
|
|
of_cpufreq_power_cooling_register(struct device_node *np,
|
|
of_cpufreq_power_cooling_register(struct device_node *np,
|
|
- const struct cpumask *clip_cpus,
|
|
|
|
|
|
+ struct cpufreq_policy *policy,
|
|
u32 capacitance,
|
|
u32 capacitance,
|
|
get_static_t plat_static_func)
|
|
get_static_t plat_static_func)
|
|
{
|
|
{
|
|
if (!np)
|
|
if (!np)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
- return __cpufreq_cooling_register(np, clip_cpus, capacitance,
|
|
|
|
|
|
+ return __cpufreq_cooling_register(np, policy, capacitance,
|
|
plat_static_func);
|
|
plat_static_func);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
|
|
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
|
|
@@ -1027,30 +916,28 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
|
|
*/
|
|
*/
|
|
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|
{
|
|
{
|
|
- struct cpufreq_cooling_device *cpufreq_dev;
|
|
|
|
|
|
+ struct cpufreq_cooling_device *cpufreq_cdev;
|
|
bool last;
|
|
bool last;
|
|
|
|
|
|
if (!cdev)
|
|
if (!cdev)
|
|
return;
|
|
return;
|
|
|
|
|
|
- cpufreq_dev = cdev->devdata;
|
|
|
|
|
|
+ cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
mutex_lock(&cooling_list_lock);
|
|
mutex_lock(&cooling_list_lock);
|
|
- list_del(&cpufreq_dev->node);
|
|
|
|
|
|
+ list_del(&cpufreq_cdev->node);
|
|
/* Unregister the notifier for the last cpufreq cooling device */
|
|
/* Unregister the notifier for the last cpufreq cooling device */
|
|
- last = list_empty(&cpufreq_dev_list);
|
|
|
|
|
|
+ last = list_empty(&cpufreq_cdev_list);
|
|
mutex_unlock(&cooling_list_lock);
|
|
mutex_unlock(&cooling_list_lock);
|
|
|
|
|
|
if (last)
|
|
if (last)
|
|
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
|
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
|
|
|
|
- thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
|
|
|
- ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
|
|
|
|
- kfree(cpufreq_dev->dyn_power_table);
|
|
|
|
- kfree(cpufreq_dev->time_in_idle_timestamp);
|
|
|
|
- kfree(cpufreq_dev->time_in_idle);
|
|
|
|
- kfree(cpufreq_dev->freq_table);
|
|
|
|
- kfree(cpufreq_dev);
|
|
|
|
|
|
+ thermal_cooling_device_unregister(cpufreq_cdev->cdev);
|
|
|
|
+ ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
|
|
|
|
+ kfree(cpufreq_cdev->idle_time);
|
|
|
|
+ kfree(cpufreq_cdev->freq_table);
|
|
|
|
+ kfree(cpufreq_cdev);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
|