|
@@ -559,6 +559,9 @@ static void _remove_device_opp(struct device_opp *dev_opp)
|
|
|
if (!list_empty(&dev_opp->opp_list))
|
|
|
return;
|
|
|
|
|
|
+ if (dev_opp->supported_hw)
|
|
|
+ return;
|
|
|
+
|
|
|
list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
|
|
|
node);
|
|
|
|
|
@@ -833,6 +836,145 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
|
|
|
+ * @dev: Device for which supported-hw has to be set.
|
|
|
+ * @versions: Array of hierarchy of versions to match.
|
|
|
+ * @count: Number of elements in the array.
|
|
|
+ *
|
|
|
+ * This is required only for the V2 bindings, and it enables a platform to
|
|
|
+ * specify the hierarchy of versions it supports. OPP layer will then enable
|
|
|
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
|
|
|
+ * property.
|
|
|
+ *
|
|
|
+ * Locking: The internal device_opp and opp structures are RCU protected.
|
|
|
+ * Hence this function internally uses RCU updater strategy with mutex locks
|
|
|
+ * to keep the integrity of the internal data structures. Callers should ensure
|
|
|
+ * that this function is *NOT* called under RCU protection or in contexts where
|
|
|
+ * mutex cannot be locked.
|
|
|
+ */
|
|
|
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
|
|
|
+ unsigned int count)
|
|
|
+{
|
|
|
+ struct device_opp *dev_opp;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* Hold our list modification lock here */
|
|
|
+ mutex_lock(&dev_opp_list_lock);
|
|
|
+
|
|
|
+ dev_opp = _add_device_opp(dev);
|
|
|
+ if (!dev_opp) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Make sure there are no concurrent readers while updating dev_opp */
|
|
|
+ WARN_ON(!list_empty(&dev_opp->opp_list));
|
|
|
+
|
|
|
+ /* Do we already have a version hierarchy associated with dev_opp? */
|
|
|
+ if (dev_opp->supported_hw) {
|
|
|
+ dev_err(dev, "%s: Already have supported hardware list\n",
|
|
|
+ __func__);
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!dev_opp->supported_hw) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev_opp->supported_hw_count = count;
|
|
|
+ mutex_unlock(&dev_opp_list_lock);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err:
|
|
|
+ _remove_device_opp(dev_opp);
|
|
|
+unlock:
|
|
|
+ mutex_unlock(&dev_opp_list_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
|
|
|
+
|
|
|
+/**
|
|
|
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
|
|
|
+ * @dev: Device for which supported-hw has to be set.
|
|
|
+ *
|
|
|
+ * This is required only for the V2 bindings, and is called for a matching
|
|
|
+ * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
|
|
|
+ * will not be freed.
|
|
|
+ *
|
|
|
+ * Locking: The internal device_opp and opp structures are RCU protected.
|
|
|
+ * Hence this function internally uses RCU updater strategy with mutex locks
|
|
|
+ * to keep the integrity of the internal data structures. Callers should ensure
|
|
|
+ * that this function is *NOT* called under RCU protection or in contexts where
|
|
|
+ * mutex cannot be locked.
|
|
|
+ */
|
|
|
+void dev_pm_opp_put_supported_hw(struct device *dev)
|
|
|
+{
|
|
|
+ struct device_opp *dev_opp;
|
|
|
+
|
|
|
+ /* Hold our list modification lock here */
|
|
|
+ mutex_lock(&dev_opp_list_lock);
|
|
|
+
|
|
|
+ /* Check for existing list for 'dev' first */
|
|
|
+ dev_opp = _find_device_opp(dev);
|
|
|
+ if (IS_ERR(dev_opp)) {
|
|
|
+ dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Make sure there are no concurrent readers while updating dev_opp */
|
|
|
+ WARN_ON(!list_empty(&dev_opp->opp_list));
|
|
|
+
|
|
|
+ if (!dev_opp->supported_hw) {
|
|
|
+ dev_err(dev, "%s: Doesn't have supported hardware list\n",
|
|
|
+ __func__);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(dev_opp->supported_hw);
|
|
|
+ dev_opp->supported_hw = NULL;
|
|
|
+ dev_opp->supported_hw_count = 0;
|
|
|
+
|
|
|
+ /* Try freeing device_opp if this was the last blocking resource */
|
|
|
+ _remove_device_opp(dev_opp);
|
|
|
+
|
|
|
+unlock:
|
|
|
+ mutex_unlock(&dev_opp_list_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
|
|
|
+
|
|
|
+static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
|
|
|
+ struct device_node *np)
|
|
|
+{
|
|
|
+ unsigned int count = dev_opp->supported_hw_count;
|
|
|
+ u32 version;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!dev_opp->supported_hw)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ while (count--) {
|
|
|
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
|
|
|
+ &version);
|
|
|
+ if (ret) {
|
|
|
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
|
|
|
+ __func__, count, ret);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Both of these are bitwise masks of the versions */
|
|
|
+ if (!(version & dev_opp->supported_hw[count]))
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
|
|
|
* @dev: device for which we do this operation
|
|
@@ -879,6 +1021,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
|
|
|
goto free_opp;
|
|
|
}
|
|
|
|
|
|
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
|
|
|
+ if (!_opp_is_supported(dev, dev_opp, np)) {
|
|
|
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
|
|
|
+ goto free_opp;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Rate is defined as an unsigned long in clk API, and so casting
|
|
|
* explicitly to its type. Must be fixed once rate is 64 bit
|