|
@@ -21,6 +21,8 @@
|
|
#include <linux/init.h>
|
|
#include <linux/init.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
+#include "clk.h"
|
|
|
|
+
|
|
static DEFINE_SPINLOCK(enable_lock);
|
|
static DEFINE_SPINLOCK(enable_lock);
|
|
static DEFINE_MUTEX(prepare_lock);
|
|
static DEFINE_MUTEX(prepare_lock);
|
|
|
|
|
|
@@ -350,6 +352,21 @@ out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /**
|
|
|
|
+ * clk_debug_unregister - remove a clk node from the debugfs clk tree
|
|
|
|
+ * @clk: the clk being removed from the debugfs clk tree
|
|
|
|
+ *
|
|
|
|
+ * Dynamically removes a clk and all it's children clk nodes from the
|
|
|
|
+ * debugfs clk tree if clk->dentry points to debugfs created by
|
|
|
|
+ * clk_debug_register in __clk_init.
|
|
|
|
+ *
|
|
|
|
+ * Caller must hold prepare_lock.
|
|
|
|
+ */
|
|
|
|
+static void clk_debug_unregister(struct clk *clk)
|
|
|
|
+{
|
|
|
|
+ debugfs_remove_recursive(clk->dentry);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* clk_debug_reparent - reparent clk node in the debugfs clk tree
|
|
* clk_debug_reparent - reparent clk node in the debugfs clk tree
|
|
* @clk: the clk being reparented
|
|
* @clk: the clk being reparented
|
|
@@ -440,6 +457,9 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
|
|
static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
|
|
static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
+static inline void clk_debug_unregister(struct clk *clk)
|
|
|
|
+{
|
|
|
|
+}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* caller must hold prepare_lock */
|
|
/* caller must hold prepare_lock */
|
|
@@ -1861,6 +1881,7 @@ int __clk_init(struct device *dev, struct clk *clk)
|
|
if (clk->ops->init)
|
|
if (clk->ops->init)
|
|
clk->ops->init(clk->hw);
|
|
clk->ops->init(clk->hw);
|
|
|
|
|
|
|
|
+ kref_init(&clk->ref);
|
|
out:
|
|
out:
|
|
clk_prepare_unlock();
|
|
clk_prepare_unlock();
|
|
|
|
|
|
@@ -1896,6 +1917,10 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
|
|
clk->flags = hw->init->flags;
|
|
clk->flags = hw->init->flags;
|
|
clk->parent_names = hw->init->parent_names;
|
|
clk->parent_names = hw->init->parent_names;
|
|
clk->num_parents = hw->init->num_parents;
|
|
clk->num_parents = hw->init->num_parents;
|
|
|
|
+ if (dev && dev->driver)
|
|
|
|
+ clk->owner = dev->driver->owner;
|
|
|
|
+ else
|
|
|
|
+ clk->owner = NULL;
|
|
|
|
|
|
ret = __clk_init(dev, clk);
|
|
ret = __clk_init(dev, clk);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -1916,6 +1941,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
|
|
goto fail_name;
|
|
goto fail_name;
|
|
}
|
|
}
|
|
clk->ops = hw->init->ops;
|
|
clk->ops = hw->init->ops;
|
|
|
|
+ if (dev && dev->driver)
|
|
|
|
+ clk->owner = dev->driver->owner;
|
|
clk->hw = hw;
|
|
clk->hw = hw;
|
|
clk->flags = hw->init->flags;
|
|
clk->flags = hw->init->flags;
|
|
clk->num_parents = hw->init->num_parents;
|
|
clk->num_parents = hw->init->num_parents;
|
|
@@ -1990,13 +2017,104 @@ fail_out:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(clk_register);
|
|
EXPORT_SYMBOL_GPL(clk_register);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Free memory allocated for a clock.
|
|
|
|
+ * Caller must hold prepare_lock.
|
|
|
|
+ */
|
|
|
|
+static void __clk_release(struct kref *ref)
|
|
|
|
+{
|
|
|
|
+ struct clk *clk = container_of(ref, struct clk, ref);
|
|
|
|
+ int i = clk->num_parents;
|
|
|
|
+
|
|
|
|
+ kfree(clk->parents);
|
|
|
|
+ while (--i >= 0)
|
|
|
|
+ kfree(clk->parent_names[i]);
|
|
|
|
+
|
|
|
|
+ kfree(clk->parent_names);
|
|
|
|
+ kfree(clk->name);
|
|
|
|
+ kfree(clk);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Empty clk_ops for unregistered clocks. These are used temporarily
|
|
|
|
+ * after clk_unregister() was called on a clock and until last clock
|
|
|
|
+ * consumer calls clk_put() and the struct clk object is freed.
|
|
|
|
+ */
|
|
|
|
+static int clk_nodrv_prepare_enable(struct clk_hw *hw)
|
|
|
|
+{
|
|
|
|
+ return -ENXIO;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
|
|
|
|
+{
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
|
+ unsigned long parent_rate)
|
|
|
|
+{
|
|
|
|
+ return -ENXIO;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
|
|
|
|
+{
|
|
|
|
+ return -ENXIO;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct clk_ops clk_nodrv_ops = {
|
|
|
|
+ .enable = clk_nodrv_prepare_enable,
|
|
|
|
+ .disable = clk_nodrv_disable_unprepare,
|
|
|
|
+ .prepare = clk_nodrv_prepare_enable,
|
|
|
|
+ .unprepare = clk_nodrv_disable_unprepare,
|
|
|
|
+ .set_rate = clk_nodrv_set_rate,
|
|
|
|
+ .set_parent = clk_nodrv_set_parent,
|
|
|
|
+};
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* clk_unregister - unregister a currently registered clock
|
|
* clk_unregister - unregister a currently registered clock
|
|
* @clk: clock to unregister
|
|
* @clk: clock to unregister
|
|
- *
|
|
|
|
- * Currently unimplemented.
|
|
|
|
*/
|
|
*/
|
|
-void clk_unregister(struct clk *clk) {}
|
|
|
|
|
|
+void clk_unregister(struct clk *clk)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ clk_prepare_lock();
|
|
|
|
+
|
|
|
|
+ if (clk->ops == &clk_nodrv_ops) {
|
|
|
|
+ pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ /*
|
|
|
|
+ * Assign empty clock ops for consumers that might still hold
|
|
|
|
+ * a reference to this clock.
|
|
|
|
+ */
|
|
|
|
+ flags = clk_enable_lock();
|
|
|
|
+ clk->ops = &clk_nodrv_ops;
|
|
|
|
+ clk_enable_unlock(flags);
|
|
|
|
+
|
|
|
|
+ if (!hlist_empty(&clk->children)) {
|
|
|
|
+ struct clk *child;
|
|
|
|
+
|
|
|
|
+ /* Reparent all children to the orphan list. */
|
|
|
|
+ hlist_for_each_entry(child, &clk->children, child_node)
|
|
|
|
+ clk_set_parent(child, NULL);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ clk_debug_unregister(clk);
|
|
|
|
+
|
|
|
|
+ hlist_del_init(&clk->child_node);
|
|
|
|
+
|
|
|
|
+ if (clk->prepare_count)
|
|
|
|
+ pr_warn("%s: unregistering prepared clock: %s\n",
|
|
|
|
+ __func__, clk->name);
|
|
|
|
+
|
|
|
|
+ kref_put(&clk->ref, __clk_release);
|
|
|
|
+out:
|
|
|
|
+ clk_prepare_unlock();
|
|
|
|
+}
|
|
EXPORT_SYMBOL_GPL(clk_unregister);
|
|
EXPORT_SYMBOL_GPL(clk_unregister);
|
|
|
|
|
|
static void devm_clk_release(struct device *dev, void *res)
|
|
static void devm_clk_release(struct device *dev, void *res)
|
|
@@ -2056,6 +2174,31 @@ void devm_clk_unregister(struct device *dev, struct clk *clk)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(devm_clk_unregister);
|
|
EXPORT_SYMBOL_GPL(devm_clk_unregister);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * clkdev helpers
|
|
|
|
+ */
|
|
|
|
+int __clk_get(struct clk *clk)
|
|
|
|
+{
|
|
|
|
+ if (clk && !try_module_get(clk->owner))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ kref_get(&clk->ref);
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __clk_put(struct clk *clk)
|
|
|
|
+{
|
|
|
|
+ if (WARN_ON_ONCE(IS_ERR(clk)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ clk_prepare_lock();
|
|
|
|
+ kref_put(&clk->ref, __clk_release);
|
|
|
|
+ clk_prepare_unlock();
|
|
|
|
+
|
|
|
|
+ if (clk)
|
|
|
|
+ module_put(clk->owner);
|
|
|
|
+}
|
|
|
|
+
|
|
/*** clk rate change notifiers ***/
|
|
/*** clk rate change notifiers ***/
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2196,7 +2339,18 @@ static const struct of_device_id __clk_of_table_sentinel
|
|
__used __section(__clk_of_table_end);
|
|
__used __section(__clk_of_table_end);
|
|
|
|
|
|
static LIST_HEAD(of_clk_providers);
|
|
static LIST_HEAD(of_clk_providers);
|
|
-static DEFINE_MUTEX(of_clk_lock);
|
|
|
|
|
|
+static DEFINE_MUTEX(of_clk_mutex);
|
|
|
|
+
|
|
|
|
+/* of_clk_provider list locking helpers */
|
|
|
|
+void of_clk_lock(void)
|
|
|
|
+{
|
|
|
|
+ mutex_lock(&of_clk_mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void of_clk_unlock(void)
|
|
|
|
+{
|
|
|
|
+ mutex_unlock(&of_clk_mutex);
|
|
|
|
+}
|
|
|
|
|
|
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
|
|
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
|
|
void *data)
|
|
void *data)
|
|
@@ -2240,9 +2394,9 @@ int of_clk_add_provider(struct device_node *np,
|
|
cp->data = data;
|
|
cp->data = data;
|
|
cp->get = clk_src_get;
|
|
cp->get = clk_src_get;
|
|
|
|
|
|
- mutex_lock(&of_clk_lock);
|
|
|
|
|
|
+ mutex_lock(&of_clk_mutex);
|
|
list_add(&cp->link, &of_clk_providers);
|
|
list_add(&cp->link, &of_clk_providers);
|
|
- mutex_unlock(&of_clk_lock);
|
|
|
|
|
|
+ mutex_unlock(&of_clk_mutex);
|
|
pr_debug("Added clock from %s\n", np->full_name);
|
|
pr_debug("Added clock from %s\n", np->full_name);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -2257,7 +2411,7 @@ void of_clk_del_provider(struct device_node *np)
|
|
{
|
|
{
|
|
struct of_clk_provider *cp;
|
|
struct of_clk_provider *cp;
|
|
|
|
|
|
- mutex_lock(&of_clk_lock);
|
|
|
|
|
|
+ mutex_lock(&of_clk_mutex);
|
|
list_for_each_entry(cp, &of_clk_providers, link) {
|
|
list_for_each_entry(cp, &of_clk_providers, link) {
|
|
if (cp->node == np) {
|
|
if (cp->node == np) {
|
|
list_del(&cp->link);
|
|
list_del(&cp->link);
|
|
@@ -2266,24 +2420,33 @@ void of_clk_del_provider(struct device_node *np)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- mutex_unlock(&of_clk_lock);
|
|
|
|
|
|
+ mutex_unlock(&of_clk_mutex);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(of_clk_del_provider);
|
|
EXPORT_SYMBOL_GPL(of_clk_del_provider);
|
|
|
|
|
|
-struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
|
|
|
|
|
|
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
|
|
{
|
|
{
|
|
struct of_clk_provider *provider;
|
|
struct of_clk_provider *provider;
|
|
struct clk *clk = ERR_PTR(-ENOENT);
|
|
struct clk *clk = ERR_PTR(-ENOENT);
|
|
|
|
|
|
/* Check if we have such a provider in our array */
|
|
/* Check if we have such a provider in our array */
|
|
- mutex_lock(&of_clk_lock);
|
|
|
|
list_for_each_entry(provider, &of_clk_providers, link) {
|
|
list_for_each_entry(provider, &of_clk_providers, link) {
|
|
if (provider->node == clkspec->np)
|
|
if (provider->node == clkspec->np)
|
|
clk = provider->get(clkspec, provider->data);
|
|
clk = provider->get(clkspec, provider->data);
|
|
if (!IS_ERR(clk))
|
|
if (!IS_ERR(clk))
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- mutex_unlock(&of_clk_lock);
|
|
|
|
|
|
+
|
|
|
|
+ return clk;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
|
|
|
|
+{
|
|
|
|
+ struct clk *clk;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&of_clk_mutex);
|
|
|
|
+ clk = __of_clk_get_from_provider(clkspec);
|
|
|
|
+ mutex_unlock(&of_clk_mutex);
|
|
|
|
|
|
return clk;
|
|
return clk;
|
|
}
|
|
}
|