|
@@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|
|
goto err_set_policy_cpu;
|
|
|
}
|
|
|
|
|
|
+ /* related cpus should atleast have policy->cpus */
|
|
|
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * affected cpus must always be the one, which are online. We aren't
|
|
|
+ * managing offline cpus here.
|
|
|
+ */
|
|
|
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
|
|
|
+
|
|
|
+ if (!frozen) {
|
|
|
+ policy->user_policy.min = policy->min;
|
|
|
+ policy->user_policy.max = policy->max;
|
|
|
+ }
|
|
|
+
|
|
|
+ down_write(&policy->rwsem);
|
|
|
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
for_each_cpu(j, policy->cpus)
|
|
|
per_cpu(cpufreq_cpu_data, j) = policy;
|
|
@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* related cpus should atleast have policy->cpus */
|
|
|
- cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
|
|
-
|
|
|
- /*
|
|
|
- * affected cpus must always be the one, which are online. We aren't
|
|
|
- * managing offline cpus here.
|
|
|
- */
|
|
|
- cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
|
|
|
-
|
|
|
- if (!frozen) {
|
|
|
- policy->user_policy.min = policy->min;
|
|
|
- policy->user_policy.max = policy->max;
|
|
|
- }
|
|
|
-
|
|
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
|
|
CPUFREQ_START, policy);
|
|
|
|
|
@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|
|
policy->user_policy.policy = policy->policy;
|
|
|
policy->user_policy.governor = policy->governor;
|
|
|
}
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
|
|
up_read(&cpufreq_rwsem);
|
|
@@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
|
|
|
*/
|
|
|
unsigned int cpufreq_get(unsigned int cpu)
|
|
|
{
|
|
|
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
|
|
unsigned int ret_freq = 0;
|
|
|
|
|
|
- if (cpufreq_disabled() || !cpufreq_driver)
|
|
|
- return -ENOENT;
|
|
|
-
|
|
|
- BUG_ON(!policy);
|
|
|
-
|
|
|
- if (!down_read_trylock(&cpufreq_rwsem))
|
|
|
- return 0;
|
|
|
-
|
|
|
- down_read(&policy->rwsem);
|
|
|
-
|
|
|
- ret_freq = __cpufreq_get(cpu);
|
|
|
+ if (policy) {
|
|
|
+ down_read(&policy->rwsem);
|
|
|
+ ret_freq = __cpufreq_get(cpu);
|
|
|
+ up_read(&policy->rwsem);
|
|
|
|
|
|
- up_read(&policy->rwsem);
|
|
|
- up_read(&cpufreq_rwsem);
|
|
|
+ cpufreq_cpu_put(policy);
|
|
|
+ }
|
|
|
|
|
|
return ret_freq;
|
|
|
}
|