|
@@ -33,8 +33,6 @@ LIST_HEAD(opp_tables);
|
|
/* Lock to allow exclusive modification to the device and opp lists */
|
|
/* Lock to allow exclusive modification to the device and opp lists */
|
|
DEFINE_MUTEX(opp_table_lock);
|
|
DEFINE_MUTEX(opp_table_lock);
|
|
|
|
|
|
-static void dev_pm_opp_get(struct dev_pm_opp *opp);
|
|
|
|
-
|
|
|
|
static struct opp_device *_find_opp_dev(const struct device *dev,
|
|
static struct opp_device *_find_opp_dev(const struct device *dev,
|
|
struct opp_table *opp_table)
|
|
struct opp_table *opp_table)
|
|
{
|
|
{
|
|
@@ -281,6 +279,23 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
|
|
|
|
|
|
|
|
+int _get_opp_count(struct opp_table *opp_table)
|
|
|
|
+{
|
|
|
|
+ struct dev_pm_opp *opp;
|
|
|
|
+ int count = 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&opp_table->lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
|
|
|
|
+ if (opp->available)
|
|
|
|
+ count++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&opp_table->lock);
|
|
|
|
+
|
|
|
|
+ return count;
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
|
|
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
|
|
* @dev: device for which we do this operation
|
|
* @dev: device for which we do this operation
|
|
@@ -291,25 +306,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
|
|
int dev_pm_opp_get_opp_count(struct device *dev)
|
|
int dev_pm_opp_get_opp_count(struct device *dev)
|
|
{
|
|
{
|
|
struct opp_table *opp_table;
|
|
struct opp_table *opp_table;
|
|
- struct dev_pm_opp *temp_opp;
|
|
|
|
- int count = 0;
|
|
|
|
|
|
+ int count;
|
|
|
|
|
|
opp_table = _find_opp_table(dev);
|
|
opp_table = _find_opp_table(dev);
|
|
if (IS_ERR(opp_table)) {
|
|
if (IS_ERR(opp_table)) {
|
|
count = PTR_ERR(opp_table);
|
|
count = PTR_ERR(opp_table);
|
|
dev_dbg(dev, "%s: OPP table not found (%d)\n",
|
|
dev_dbg(dev, "%s: OPP table not found (%d)\n",
|
|
__func__, count);
|
|
__func__, count);
|
|
- return count;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_lock(&opp_table->lock);
|
|
|
|
-
|
|
|
|
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
|
|
|
|
- if (temp_opp->available)
|
|
|
|
- count++;
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- mutex_unlock(&opp_table->lock);
|
|
|
|
|
|
+ count = _get_opp_count(opp_table);
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
|
|
return count;
|
|
return count;
|
|
@@ -892,7 +899,7 @@ static void _opp_kref_release(struct kref *kref)
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
}
|
|
}
|
|
|
|
|
|
-static void dev_pm_opp_get(struct dev_pm_opp *opp)
|
|
|
|
|
|
+void dev_pm_opp_get(struct dev_pm_opp *opp)
|
|
{
|
|
{
|
|
kref_get(&opp->kref);
|
|
kref_get(&opp->kref);
|
|
}
|
|
}
|
|
@@ -985,22 +992,11 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Returns:
|
|
|
|
- * 0: On success. And appropriate error message for duplicate OPPs.
|
|
|
|
- * -EBUSY: For OPP with same freq/volt and is available. The callers of
|
|
|
|
- * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
|
|
|
|
- * sure we don't print error messages unnecessarily if different parts of
|
|
|
|
- * kernel try to initialize the OPP table.
|
|
|
|
- * -EEXIST: For OPP with same freq but different volt or is unavailable. This
|
|
|
|
- * should be considered an error by the callers of _opp_add().
|
|
|
|
- */
|
|
|
|
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|
|
|
- struct opp_table *opp_table)
|
|
|
|
|
|
+static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
|
|
|
|
+ struct opp_table *opp_table,
|
|
|
|
+ struct list_head **head)
|
|
{
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
struct dev_pm_opp *opp;
|
|
- struct list_head *head;
|
|
|
|
- int ret;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Insert new OPP in order of increasing frequency and discard if
|
|
* Insert new OPP in order of increasing frequency and discard if
|
|
@@ -1010,17 +1006,14 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|
* loop, don't replace it with head otherwise it will become an infinite
|
|
* loop, don't replace it with head otherwise it will become an infinite
|
|
* loop.
|
|
* loop.
|
|
*/
|
|
*/
|
|
- mutex_lock(&opp_table->lock);
|
|
|
|
- head = &opp_table->opp_list;
|
|
|
|
-
|
|
|
|
list_for_each_entry(opp, &opp_table->opp_list, node) {
|
|
list_for_each_entry(opp, &opp_table->opp_list, node) {
|
|
if (new_opp->rate > opp->rate) {
|
|
if (new_opp->rate > opp->rate) {
|
|
- head = &opp->node;
|
|
|
|
|
|
+ *head = &opp->node;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
if (new_opp->rate < opp->rate)
|
|
if (new_opp->rate < opp->rate)
|
|
- break;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
/* Duplicate OPPs */
|
|
/* Duplicate OPPs */
|
|
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
|
|
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
|
|
@@ -1029,15 +1022,39 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|
new_opp->supplies[0].u_volt, new_opp->available);
|
|
new_opp->supplies[0].u_volt, new_opp->available);
|
|
|
|
|
|
/* Should we compare voltages for all regulators here ? */
|
|
/* Should we compare voltages for all regulators here ? */
|
|
- ret = opp->available &&
|
|
|
|
- new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
|
|
|
|
-
|
|
|
|
- mutex_unlock(&opp_table->lock);
|
|
|
|
- return ret;
|
|
|
|
|
|
+ return opp->available &&
|
|
|
|
+ new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
|
|
}
|
|
}
|
|
|
|
|
|
- if (opp_table->get_pstate)
|
|
|
|
- new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
|
|
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Returns:
|
|
|
|
+ * 0: On success. And appropriate error message for duplicate OPPs.
|
|
|
|
+ * -EBUSY: For OPP with same freq/volt and is available. The callers of
|
|
|
|
+ * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
|
|
|
|
+ * sure we don't print error messages unnecessarily if different parts of
|
|
|
|
+ * kernel try to initialize the OPP table.
|
|
|
|
+ * -EEXIST: For OPP with same freq but different volt or is unavailable. This
|
|
|
|
+ * should be considered an error by the callers of _opp_add().
|
|
|
|
+ */
|
|
|
|
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|
|
|
+ struct opp_table *opp_table, bool rate_not_available)
|
|
|
|
+{
|
|
|
|
+ struct list_head *head;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&opp_table->lock);
|
|
|
|
+ head = &opp_table->opp_list;
|
|
|
|
+
|
|
|
|
+ if (likely(!rate_not_available)) {
|
|
|
|
+ ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
|
|
|
|
+ if (ret) {
|
|
|
|
+ mutex_unlock(&opp_table->lock);
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
list_add(&new_opp->node, head);
|
|
list_add(&new_opp->node, head);
|
|
mutex_unlock(&opp_table->lock);
|
|
mutex_unlock(&opp_table->lock);
|
|
@@ -1104,7 +1121,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
|
|
new_opp->available = true;
|
|
new_opp->available = true;
|
|
new_opp->dynamic = dynamic;
|
|
new_opp->dynamic = dynamic;
|
|
|
|
|
|
- ret = _opp_add(dev, new_opp, opp_table);
|
|
|
|
|
|
+ ret = _opp_add(dev, new_opp, opp_table, false);
|
|
if (ret) {
|
|
if (ret) {
|
|
/* Don't return error for duplicate OPPs */
|
|
/* Don't return error for duplicate OPPs */
|
|
if (ret == -EBUSY)
|
|
if (ret == -EBUSY)
|
|
@@ -1140,7 +1157,6 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
|
|
const u32 *versions, unsigned int count)
|
|
const u32 *versions, unsigned int count)
|
|
{
|
|
{
|
|
struct opp_table *opp_table;
|
|
struct opp_table *opp_table;
|
|
- int ret;
|
|
|
|
|
|
|
|
opp_table = dev_pm_opp_get_opp_table(dev);
|
|
opp_table = dev_pm_opp_get_opp_table(dev);
|
|
if (!opp_table)
|
|
if (!opp_table)
|
|
@@ -1149,29 +1165,20 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
|
|
- /* Do we already have a version hierarchy associated with opp_table? */
|
|
|
|
- if (opp_table->supported_hw) {
|
|
|
|
- dev_err(dev, "%s: Already have supported hardware list\n",
|
|
|
|
- __func__);
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Another CPU that shares the OPP table has set the property ? */
|
|
|
|
+ if (opp_table->supported_hw)
|
|
|
|
+ return opp_table;
|
|
|
|
|
|
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
|
|
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!opp_table->supported_hw) {
|
|
if (!opp_table->supported_hw) {
|
|
- ret = -ENOMEM;
|
|
|
|
- goto err;
|
|
|
|
|
|
+ dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
}
|
|
}
|
|
|
|
|
|
opp_table->supported_hw_count = count;
|
|
opp_table->supported_hw_count = count;
|
|
|
|
|
|
return opp_table;
|
|
return opp_table;
|
|
-
|
|
|
|
-err:
|
|
|
|
- dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
-
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
|
|
|
|
|
|
@@ -1188,12 +1195,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
|
|
- if (!opp_table->supported_hw) {
|
|
|
|
- pr_err("%s: Doesn't have supported hardware list\n",
|
|
|
|
- __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
kfree(opp_table->supported_hw);
|
|
kfree(opp_table->supported_hw);
|
|
opp_table->supported_hw = NULL;
|
|
opp_table->supported_hw = NULL;
|
|
opp_table->supported_hw_count = 0;
|
|
opp_table->supported_hw_count = 0;
|
|
@@ -1215,7 +1216,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
|
|
struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
|
|
struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
|
|
{
|
|
{
|
|
struct opp_table *opp_table;
|
|
struct opp_table *opp_table;
|
|
- int ret;
|
|
|
|
|
|
|
|
opp_table = dev_pm_opp_get_opp_table(dev);
|
|
opp_table = dev_pm_opp_get_opp_table(dev);
|
|
if (!opp_table)
|
|
if (!opp_table)
|
|
@@ -1224,26 +1224,17 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
|
|
- /* Do we already have a prop-name associated with opp_table? */
|
|
|
|
- if (opp_table->prop_name) {
|
|
|
|
- dev_err(dev, "%s: Already have prop-name %s\n", __func__,
|
|
|
|
- opp_table->prop_name);
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Another CPU that shares the OPP table has set the property ? */
|
|
|
|
+ if (opp_table->prop_name)
|
|
|
|
+ return opp_table;
|
|
|
|
|
|
opp_table->prop_name = kstrdup(name, GFP_KERNEL);
|
|
opp_table->prop_name = kstrdup(name, GFP_KERNEL);
|
|
if (!opp_table->prop_name) {
|
|
if (!opp_table->prop_name) {
|
|
- ret = -ENOMEM;
|
|
|
|
- goto err;
|
|
|
|
|
|
+ dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
}
|
|
}
|
|
|
|
|
|
return opp_table;
|
|
return opp_table;
|
|
-
|
|
|
|
-err:
|
|
|
|
- dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
-
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
|
|
|
|
|
|
@@ -1260,11 +1251,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
|
|
- if (!opp_table->prop_name) {
|
|
|
|
- pr_err("%s: Doesn't have a prop-name\n", __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
kfree(opp_table->prop_name);
|
|
kfree(opp_table->prop_name);
|
|
opp_table->prop_name = NULL;
|
|
opp_table->prop_name = NULL;
|
|
|
|
|
|
@@ -1334,11 +1320,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Already have regulators set */
|
|
|
|
- if (opp_table->regulators) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Another CPU that shares the OPP table has set the regulators ? */
|
|
|
|
+ if (opp_table->regulators)
|
|
|
|
+ return opp_table;
|
|
|
|
|
|
opp_table->regulators = kmalloc_array(count,
|
|
opp_table->regulators = kmalloc_array(count,
|
|
sizeof(*opp_table->regulators),
|
|
sizeof(*opp_table->regulators),
|
|
@@ -1392,10 +1376,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- if (!opp_table->regulators) {
|
|
|
|
- pr_err("%s: Doesn't have regulators set\n", __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ if (!opp_table->regulators)
|
|
|
|
+ goto put_opp_table;
|
|
|
|
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
@@ -1409,6 +1391,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
|
|
opp_table->regulators = NULL;
|
|
opp_table->regulators = NULL;
|
|
opp_table->regulator_count = 0;
|
|
opp_table->regulator_count = 0;
|
|
|
|
|
|
|
|
+put_opp_table:
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
|
|
@@ -1494,7 +1477,6 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
|
|
int (*set_opp)(struct dev_pm_set_opp_data *data))
|
|
int (*set_opp)(struct dev_pm_set_opp_data *data))
|
|
{
|
|
{
|
|
struct opp_table *opp_table;
|
|
struct opp_table *opp_table;
|
|
- int ret;
|
|
|
|
|
|
|
|
if (!set_opp)
|
|
if (!set_opp)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
@@ -1505,24 +1487,15 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
|
|
|
|
|
|
/* This should be called before OPPs are initialized */
|
|
/* This should be called before OPPs are initialized */
|
|
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
|
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Already have custom set_opp helper */
|
|
|
|
- if (WARN_ON(opp_table->set_opp)) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
|
|
+ dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
}
|
|
}
|
|
|
|
|
|
- opp_table->set_opp = set_opp;
|
|
|
|
|
|
+ /* Another CPU that shares the OPP table has set the helper ? */
|
|
|
|
+ if (!opp_table->set_opp)
|
|
|
|
+ opp_table->set_opp = set_opp;
|
|
|
|
|
|
return opp_table;
|
|
return opp_table;
|
|
-
|
|
|
|
-err:
|
|
|
|
- dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
-
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
|
|
|
|
|
|
@@ -1535,96 +1508,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
|
|
*/
|
|
*/
|
|
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
|
|
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
|
|
{
|
|
{
|
|
- if (!opp_table->set_opp) {
|
|
|
|
- pr_err("%s: Doesn't have custom set_opp helper set\n",
|
|
|
|
- __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
/* Make sure there are no concurrent readers while updating opp_table */
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
|
|
opp_table->set_opp = NULL;
|
|
opp_table->set_opp = NULL;
|
|
-
|
|
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
dev_pm_opp_put_opp_table(opp_table);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
|
|
|
|
|
|
-/**
|
|
|
|
- * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
|
|
|
|
- * @dev: Device for which the helper is getting registered.
|
|
|
|
- * @get_pstate: Helper.
|
|
|
|
- *
|
|
|
|
- * TODO: Remove this callback after the same information is available via Device
|
|
|
|
- * Tree.
|
|
|
|
- *
|
|
|
|
- * This allows a platform to initialize the performance states of individual
|
|
|
|
- * OPPs for its devices, until we get similar information directly from DT.
|
|
|
|
- *
|
|
|
|
- * This must be called before the OPPs are initialized for the device.
|
|
|
|
- */
|
|
|
|
-struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
|
|
|
|
- int (*get_pstate)(struct device *dev, unsigned long rate))
|
|
|
|
-{
|
|
|
|
- struct opp_table *opp_table;
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- if (!get_pstate)
|
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
|
-
|
|
|
|
- opp_table = dev_pm_opp_get_opp_table(dev);
|
|
|
|
- if (!opp_table)
|
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
|
-
|
|
|
|
- /* This should be called before OPPs are initialized */
|
|
|
|
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Already have genpd_performance_state set */
|
|
|
|
- if (WARN_ON(opp_table->genpd_performance_state)) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto err;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- opp_table->genpd_performance_state = true;
|
|
|
|
- opp_table->get_pstate = get_pstate;
|
|
|
|
-
|
|
|
|
- return opp_table;
|
|
|
|
-
|
|
|
|
-err:
|
|
|
|
- dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
-
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
|
|
|
|
- * get_pstate() helper
|
|
|
|
- * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
|
|
|
|
- *
|
|
|
|
- * Release resources blocked for platform specific get_pstate() helper.
|
|
|
|
- */
|
|
|
|
-void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
|
|
|
|
-{
|
|
|
|
- if (!opp_table->genpd_performance_state) {
|
|
|
|
- pr_err("%s: Doesn't have performance states set\n",
|
|
|
|
- __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Make sure there are no concurrent readers while updating opp_table */
|
|
|
|
- WARN_ON(!list_empty(&opp_table->opp_list));
|
|
|
|
-
|
|
|
|
- opp_table->genpd_performance_state = false;
|
|
|
|
- opp_table->get_pstate = NULL;
|
|
|
|
-
|
|
|
|
- dev_pm_opp_put_opp_table(opp_table);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* dev_pm_opp_add() - Add an OPP table from a table definitions
|
|
* dev_pm_opp_add() - Add an OPP table from a table definitions
|
|
* @dev: device for which we do this operation
|
|
* @dev: device for which we do this operation
|