|
@@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
|
|
|
int ret = 0;
|
|
|
|
|
|
/* Some related CPUs might not be present (physically hotplugged) */
|
|
|
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
|
|
+ for_each_cpu(j, policy->real_cpus) {
|
|
|
if (j == policy->kobj_cpu)
|
|
|
continue;
|
|
|
|
|
@@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
|
|
|
unsigned int j;
|
|
|
|
|
|
/* Some related CPUs might not be present (physically hotplugged) */
|
|
|
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
|
|
+ for_each_cpu(j, policy->real_cpus) {
|
|
|
if (j == policy->kobj_cpu)
|
|
|
continue;
|
|
|
|
|
@@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
|
|
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
|
|
|
goto err_free_cpumask;
|
|
|
|
|
|
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
|
|
|
+ goto err_free_rcpumask;
|
|
|
+
|
|
|
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
|
|
|
"cpufreq");
|
|
|
if (ret) {
|
|
|
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
|
|
- goto err_free_rcpumask;
|
|
|
+ goto err_free_real_cpus;
|
|
|
}
|
|
|
|
|
|
INIT_LIST_HEAD(&policy->policy_list);
|
|
@@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
|
|
|
|
|
return policy;
|
|
|
|
|
|
+err_free_real_cpus:
|
|
|
+ free_cpumask_var(policy->real_cpus);
|
|
|
err_free_rcpumask:
|
|
|
free_cpumask_var(policy->related_cpus);
|
|
|
err_free_cpumask:
|
|
@@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
|
|
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
|
|
cpufreq_policy_put_kobj(policy, notify);
|
|
|
+ free_cpumask_var(policy->real_cpus);
|
|
|
free_cpumask_var(policy->related_cpus);
|
|
|
free_cpumask_var(policy->cpus);
|
|
|
kfree(policy);
|
|
@@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
|
|
|
pr_debug("adding CPU %u\n", cpu);
|
|
|
|
|
|
- /*
|
|
|
- * Only possible if 'cpu' wasn't physically present earlier and we are
|
|
|
- * here from subsys_interface add callback. A hotplug notifier will
|
|
|
- * follow and we will handle it like logical CPU hotplug then. For now,
|
|
|
- * just create the sysfs link.
|
|
|
- */
|
|
|
- if (cpu_is_offline(cpu))
|
|
|
- return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
|
|
|
+ if (cpu_is_offline(cpu)) {
|
|
|
+ /*
|
|
|
+ * Only possible if we are here from the subsys_interface add
|
|
|
+ * callback. A hotplug notifier will follow and we will handle
|
|
|
+ * it as CPU online then. For now, just create the sysfs link,
|
|
|
+ * unless there is no policy or the link is already present.
|
|
|
+ */
|
|
|
+ policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
+ return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
|
|
|
+ ? add_cpu_dev_symlink(policy, cpu) : 0;
|
|
|
+ }
|
|
|
|
|
|
if (!down_read_trylock(&cpufreq_rwsem))
|
|
|
return 0;
|
|
@@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
/* related cpus should atleast have policy->cpus */
|
|
|
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
|
|
|
|
|
+ /* Remember which CPUs have been present at the policy creation time. */
|
|
|
+ if (!recover_policy)
|
|
|
+ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
|
|
|
+
|
|
|
/*
|
|
|
* affected cpus must always be the one, which are online. We aren't
|
|
|
* managing offline cpus here.
|
|
@@ -1420,8 +1433,7 @@ nomem_out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|
|
- struct subsys_interface *sif)
|
|
|
+static int __cpufreq_remove_dev_prepare(struct device *dev)
|
|
|
{
|
|
|
unsigned int cpu = dev->id;
|
|
|
int ret = 0;
|
|
@@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|
|
|
|
|
if (has_target()) {
|
|
|
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
|
|
- if (ret) {
|
|
|
+ if (ret)
|
|
|
pr_err("%s: Failed to stop governor\n", __func__);
|
|
|
- return ret;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
down_write(&policy->rwsem);
|
|
@@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
- struct subsys_interface *sif)
|
|
|
+static int __cpufreq_remove_dev_finish(struct device *dev)
|
|
|
{
|
|
|
unsigned int cpu = dev->id;
|
|
|
int ret;
|
|
@@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
/* If cpu is last user of policy, free policy */
|
|
|
if (has_target()) {
|
|
|
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
|
|
- if (ret) {
|
|
|
+ if (ret)
|
|
|
pr_err("%s: Failed to exit governor\n", __func__);
|
|
|
- return ret;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
if (cpufreq_driver->exit)
|
|
|
cpufreq_driver->exit(policy);
|
|
|
|
|
|
- /* Free the policy only if the driver is getting removed. */
|
|
|
- if (sif)
|
|
|
- cpufreq_policy_free(policy, true);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
{
|
|
|
unsigned int cpu = dev->id;
|
|
|
- int ret;
|
|
|
-
|
|
|
- /*
|
|
|
- * Only possible if 'cpu' is getting physically removed now. A hotplug
|
|
|
- * notifier should have already been called and we just need to remove
|
|
|
- * link or free policy here.
|
|
|
- */
|
|
|
- if (cpu_is_offline(cpu)) {
|
|
|
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
- struct cpumask mask;
|
|
|
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
|
|
|
- if (!policy)
|
|
|
- return 0;
|
|
|
+ if (!policy)
|
|
|
+ return 0;
|
|
|
|
|
|
- cpumask_copy(&mask, policy->related_cpus);
|
|
|
- cpumask_clear_cpu(cpu, &mask);
|
|
|
+ if (cpu_online(cpu)) {
|
|
|
+ __cpufreq_remove_dev_prepare(dev);
|
|
|
+ __cpufreq_remove_dev_finish(dev);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Free policy only if all policy->related_cpus are removed
|
|
|
- * physically.
|
|
|
- */
|
|
|
- if (cpumask_intersects(&mask, cpu_present_mask)) {
|
|
|
- remove_cpu_dev_symlink(policy, cpu);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ cpumask_clear_cpu(cpu, policy->real_cpus);
|
|
|
|
|
|
+ if (cpumask_empty(policy->real_cpus)) {
|
|
|
cpufreq_policy_free(policy, true);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- ret = __cpufreq_remove_dev_prepare(dev, sif);
|
|
|
+ if (cpu != policy->kobj_cpu) {
|
|
|
+ remove_cpu_dev_symlink(policy, cpu);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * The CPU owning the policy object is going away. Move it to
|
|
|
+ * another suitable CPU.
|
|
|
+ */
|
|
|
+ unsigned int new_cpu = cpumask_first(policy->real_cpus);
|
|
|
+ struct device *new_dev = get_cpu_device(new_cpu);
|
|
|
+
|
|
|
+ dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
|
|
|
|
|
|
- if (!ret)
|
|
|
- ret = __cpufreq_remove_dev_finish(dev, sif);
|
|
|
+ sysfs_remove_link(&new_dev->kobj, "cpufreq");
|
|
|
+ policy->kobj_cpu = new_cpu;
|
|
|
+ WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
|
|
|
+ }
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void handle_update(struct work_struct *work)
|
|
@@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
|
|
break;
|
|
|
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
- __cpufreq_remove_dev_prepare(dev, NULL);
|
|
|
+ __cpufreq_remove_dev_prepare(dev);
|
|
|
break;
|
|
|
|
|
|
case CPU_POST_DEAD:
|
|
|
- __cpufreq_remove_dev_finish(dev, NULL);
|
|
|
+ __cpufreq_remove_dev_finish(dev);
|
|
|
break;
|
|
|
|
|
|
case CPU_DOWN_FAILED:
|