|
@@ -37,13 +37,6 @@ static HLIST_HEAD(clk_root_list);
|
|
|
static HLIST_HEAD(clk_orphan_list);
|
|
|
static LIST_HEAD(clk_notifier_list);
|
|
|
|
|
|
-static long clk_core_get_accuracy(struct clk_core *core);
|
|
|
-static unsigned long clk_core_get_rate(struct clk_core *core);
|
|
|
-static int clk_core_get_phase(struct clk_core *core);
|
|
|
-static bool clk_core_is_prepared(struct clk_core *core);
|
|
|
-static bool clk_core_is_enabled(struct clk_core *core);
|
|
|
-static struct clk_core *clk_core_lookup(const char *name);
|
|
|
-
|
|
|
/*** private data structures ***/
|
|
|
|
|
|
struct clk_core {
|
|
@@ -145,2093 +138,2075 @@ static void clk_enable_unlock(unsigned long flags)
|
|
|
spin_unlock_irqrestore(&enable_lock, flags);
|
|
|
}
|
|
|
|
|
|
-/*** debugfs support ***/
|
|
|
-
|
|
|
-#ifdef CONFIG_DEBUG_FS
|
|
|
-#include <linux/debugfs.h>
|
|
|
+static bool clk_core_is_prepared(struct clk_core *core)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * .is_prepared is optional for clocks that can prepare
|
|
|
+ * fall back to software usage counter if it is missing
|
|
|
+ */
|
|
|
+ if (!core->ops->is_prepared)
|
|
|
+ return core->prepare_count;
|
|
|
|
|
|
-static struct dentry *rootdir;
|
|
|
-static int inited = 0;
|
|
|
-static DEFINE_MUTEX(clk_debug_lock);
|
|
|
-static HLIST_HEAD(clk_debug_list);
|
|
|
+ return core->ops->is_prepared(core->hw);
|
|
|
+}
|
|
|
|
|
|
-static struct hlist_head *all_lists[] = {
|
|
|
- &clk_root_list,
|
|
|
- &clk_orphan_list,
|
|
|
- NULL,
|
|
|
-};
|
|
|
+static bool clk_core_is_enabled(struct clk_core *core)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * .is_enabled is only mandatory for clocks that gate
|
|
|
+ * fall back to software usage counter if .is_enabled is missing
|
|
|
+ */
|
|
|
+ if (!core->ops->is_enabled)
|
|
|
+ return core->enable_count;
|
|
|
|
|
|
-static struct hlist_head *orphan_list[] = {
|
|
|
- &clk_orphan_list,
|
|
|
- NULL,
|
|
|
-};
|
|
|
+ return core->ops->is_enabled(core->hw);
|
|
|
+}
|
|
|
|
|
|
-static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
|
|
|
- int level)
|
|
|
+/* caller must hold prepare_lock */
|
|
|
+static void clk_unprepare_unused_subtree(struct clk_core *core)
|
|
|
{
|
|
|
- if (!c)
|
|
|
+ struct clk_core *child;
|
|
|
+
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
+
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node)
|
|
|
+ clk_unprepare_unused_subtree(child);
|
|
|
+
|
|
|
+ if (core->prepare_count)
|
|
|
return;
|
|
|
|
|
|
- seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
|
|
|
- level * 3 + 1, "",
|
|
|
- 30 - level * 3, c->name,
|
|
|
- c->enable_count, c->prepare_count, clk_core_get_rate(c),
|
|
|
- clk_core_get_accuracy(c), clk_core_get_phase(c));
|
|
|
+ if (core->flags & CLK_IGNORE_UNUSED)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (clk_core_is_prepared(core)) {
|
|
|
+ trace_clk_unprepare(core);
|
|
|
+ if (core->ops->unprepare_unused)
|
|
|
+ core->ops->unprepare_unused(core->hw);
|
|
|
+ else if (core->ops->unprepare)
|
|
|
+ core->ops->unprepare(core->hw);
|
|
|
+ trace_clk_unprepare_complete(core);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
|
|
- int level)
|
|
|
+/* caller must hold prepare_lock */
|
|
|
+static void clk_disable_unused_subtree(struct clk_core *core)
|
|
|
{
|
|
|
struct clk_core *child;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- if (!c)
|
|
|
- return;
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
|
|
|
- clk_summary_show_one(s, c, level);
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node)
|
|
|
+ clk_disable_unused_subtree(child);
|
|
|
|
|
|
- hlist_for_each_entry(child, &c->children, child_node)
|
|
|
- clk_summary_show_subtree(s, child, level + 1);
|
|
|
+ flags = clk_enable_lock();
|
|
|
+
|
|
|
+ if (core->enable_count)
|
|
|
+ goto unlock_out;
|
|
|
+
|
|
|
+ if (core->flags & CLK_IGNORE_UNUSED)
|
|
|
+ goto unlock_out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * some gate clocks have special needs during the disable-unused
|
|
|
+ * sequence. call .disable_unused if available, otherwise fall
|
|
|
+ * back to .disable
|
|
|
+ */
|
|
|
+ if (clk_core_is_enabled(core)) {
|
|
|
+ trace_clk_disable(core);
|
|
|
+ if (core->ops->disable_unused)
|
|
|
+ core->ops->disable_unused(core->hw);
|
|
|
+ else if (core->ops->disable)
|
|
|
+ core->ops->disable(core->hw);
|
|
|
+ trace_clk_disable_complete(core);
|
|
|
+ }
|
|
|
+
|
|
|
+unlock_out:
|
|
|
+ clk_enable_unlock(flags);
|
|
|
}
|
|
|
|
|
|
-static int clk_summary_show(struct seq_file *s, void *data)
|
|
|
+static bool clk_ignore_unused;
|
|
|
+static int __init clk_ignore_unused_setup(char *__unused)
|
|
|
{
|
|
|
- struct clk_core *c;
|
|
|
- struct hlist_head **lists = (struct hlist_head **)s->private;
|
|
|
+ clk_ignore_unused = true;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
|
|
|
|
|
|
- seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
|
|
|
- seq_puts(s, "----------------------------------------------------------------------------------------\n");
|
|
|
+static int clk_disable_unused(void)
|
|
|
+{
|
|
|
+ struct clk_core *core;
|
|
|
+
|
|
|
+ if (clk_ignore_unused) {
|
|
|
+ pr_warn("clk: Not disabling unused clocks\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
|
|
|
clk_prepare_lock();
|
|
|
|
|
|
- for (; *lists; lists++)
|
|
|
- hlist_for_each_entry(c, *lists, child_node)
|
|
|
- clk_summary_show_subtree(s, c, 0);
|
|
|
+ hlist_for_each_entry(core, &clk_root_list, child_node)
|
|
|
+ clk_disable_unused_subtree(core);
|
|
|
+
|
|
|
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
|
|
|
+ clk_disable_unused_subtree(core);
|
|
|
+
|
|
|
+ hlist_for_each_entry(core, &clk_root_list, child_node)
|
|
|
+ clk_unprepare_unused_subtree(core);
|
|
|
+
|
|
|
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
|
|
|
+ clk_unprepare_unused_subtree(core);
|
|
|
|
|
|
clk_prepare_unlock();
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+late_initcall_sync(clk_disable_unused);
|
|
|
|
|
|
+/*** helper functions ***/
|
|
|
|
|
|
-static int clk_summary_open(struct inode *inode, struct file *file)
|
|
|
+const char *__clk_get_name(struct clk *clk)
|
|
|
{
|
|
|
- return single_open(file, clk_summary_show, inode->i_private);
|
|
|
+ return !clk ? NULL : clk->core->name;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_name);
|
|
|
|
|
|
-static const struct file_operations clk_summary_fops = {
|
|
|
- .open = clk_summary_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
+struct clk_hw *__clk_get_hw(struct clk *clk)
|
|
|
+{
|
|
|
+ return !clk ? NULL : clk->core->hw;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_hw);
|
|
|
|
|
|
-static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
|
|
|
+u8 __clk_get_num_parents(struct clk *clk)
|
|
|
{
|
|
|
- if (!c)
|
|
|
- return;
|
|
|
+ return !clk ? 0 : clk->core->num_parents;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_num_parents);
|
|
|
|
|
|
- seq_printf(s, "\"%s\": { ", c->name);
|
|
|
- seq_printf(s, "\"enable_count\": %d,", c->enable_count);
|
|
|
- seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
|
|
|
- seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
|
|
|
- seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
|
|
|
- seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
|
|
|
+struct clk *__clk_get_parent(struct clk *clk)
|
|
|
+{
|
|
|
+ if (!clk)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* TODO: Create a per-user clk and change callers to call clk_put */
|
|
|
+ return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_parent);
|
|
|
|
|
|
-static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
|
|
|
+static struct clk_core *__clk_lookup_subtree(const char *name,
|
|
|
+ struct clk_core *core)
|
|
|
{
|
|
|
struct clk_core *child;
|
|
|
+ struct clk_core *ret;
|
|
|
|
|
|
- if (!c)
|
|
|
- return;
|
|
|
-
|
|
|
- clk_dump_one(s, c, level);
|
|
|
+ if (!strcmp(core->name, name))
|
|
|
+ return core;
|
|
|
|
|
|
- hlist_for_each_entry(child, &c->children, child_node) {
|
|
|
- seq_printf(s, ",");
|
|
|
- clk_dump_subtree(s, child, level + 1);
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
+ ret = __clk_lookup_subtree(name, child);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- seq_printf(s, "}");
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
-static int clk_dump(struct seq_file *s, void *data)
|
|
|
+static struct clk_core *clk_core_lookup(const char *name)
|
|
|
{
|
|
|
- struct clk_core *c;
|
|
|
- bool first_node = true;
|
|
|
- struct hlist_head **lists = (struct hlist_head **)s->private;
|
|
|
-
|
|
|
- seq_printf(s, "{");
|
|
|
+ struct clk_core *root_clk;
|
|
|
+ struct clk_core *ret;
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
+ if (!name)
|
|
|
+ return NULL;
|
|
|
|
|
|
- for (; *lists; lists++) {
|
|
|
- hlist_for_each_entry(c, *lists, child_node) {
|
|
|
- if (!first_node)
|
|
|
- seq_puts(s, ",");
|
|
|
- first_node = false;
|
|
|
- clk_dump_subtree(s, c, 0);
|
|
|
- }
|
|
|
+ /* search the 'proper' clk tree first */
|
|
|
+ hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
|
|
|
+ ret = __clk_lookup_subtree(name, root_clk);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- clk_prepare_unlock();
|
|
|
+ /* if not found, then search the orphan tree */
|
|
|
+ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
|
|
|
+ ret = __clk_lookup_subtree(name, root_clk);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
- seq_printf(s, "}");
|
|
|
- return 0;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static int clk_dump_open(struct inode *inode, struct file *file)
|
|
|
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
|
|
|
+ u8 index)
|
|
|
{
|
|
|
- return single_open(file, clk_dump, inode->i_private);
|
|
|
+ if (!core || index >= core->num_parents)
|
|
|
+ return NULL;
|
|
|
+ else if (!core->parents)
|
|
|
+ return clk_core_lookup(core->parent_names[index]);
|
|
|
+ else if (!core->parents[index])
|
|
|
+ return core->parents[index] =
|
|
|
+ clk_core_lookup(core->parent_names[index]);
|
|
|
+ else
|
|
|
+ return core->parents[index];
|
|
|
}
|
|
|
|
|
|
-static const struct file_operations clk_dump_fops = {
|
|
|
- .open = clk_dump_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
|
|
|
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
|
|
|
{
|
|
|
- struct dentry *d;
|
|
|
- int ret = -ENOMEM;
|
|
|
+ struct clk_core *parent;
|
|
|
|
|
|
- if (!core || !pdentry) {
|
|
|
- ret = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (!clk)
|
|
|
+ return NULL;
|
|
|
|
|
|
- d = debugfs_create_dir(core->name, pdentry);
|
|
|
- if (!d)
|
|
|
- goto out;
|
|
|
-
|
|
|
- core->dentry = d;
|
|
|
-
|
|
|
- d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->rate);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
-
|
|
|
- d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->accuracy);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+ parent = clk_core_get_parent_by_index(clk->core, index);
|
|
|
|
|
|
- d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->phase);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+ return !parent ? NULL : parent->hw->clk;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
|
|
|
|
|
|
- d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->flags);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+unsigned int __clk_get_enable_count(struct clk *clk)
|
|
|
+{
|
|
|
+ return !clk ? 0 : clk->core->enable_count;
|
|
|
+}
|
|
|
|
|
|
- d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->prepare_count);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
|
|
|
+{
|
|
|
+ unsigned long ret;
|
|
|
|
|
|
- d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->enable_count);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+ if (!core) {
|
|
|
+ ret = 0;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
|
|
|
- (u32 *)&core->notifier_count);
|
|
|
- if (!d)
|
|
|
- goto err_out;
|
|
|
+ ret = core->rate;
|
|
|
|
|
|
- if (core->ops->debug_init) {
|
|
|
- ret = core->ops->debug_init(core->hw, core->dentry);
|
|
|
- if (ret)
|
|
|
- goto err_out;
|
|
|
- }
|
|
|
+ if (core->flags & CLK_IS_ROOT)
|
|
|
+ goto out;
|
|
|
|
|
|
- ret = 0;
|
|
|
- goto out;
|
|
|
+ if (!core->parent)
|
|
|
+ ret = 0;
|
|
|
|
|
|
-err_out:
|
|
|
- debugfs_remove_recursive(core->dentry);
|
|
|
- core->dentry = NULL;
|
|
|
out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * clk_debug_register - add a clk node to the debugfs clk tree
|
|
|
- * @core: the clk being added to the debugfs clk tree
|
|
|
- *
|
|
|
- * Dynamically adds a clk to the debugfs clk tree if debugfs has been
|
|
|
- * initialized. Otherwise it bails out early since the debugfs clk tree
|
|
|
- * will be created lazily by clk_debug_init as part of a late_initcall.
|
|
|
- */
|
|
|
-static int clk_debug_register(struct clk_core *core)
|
|
|
+unsigned long __clk_get_rate(struct clk *clk)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- mutex_lock(&clk_debug_lock);
|
|
|
- hlist_add_head(&core->debug_node, &clk_debug_list);
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- if (!inited)
|
|
|
- goto unlock;
|
|
|
+ return clk_core_get_rate_nolock(clk->core);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_rate);
|
|
|
|
|
|
- ret = clk_debug_create_one(core, rootdir);
|
|
|
-unlock:
|
|
|
- mutex_unlock(&clk_debug_lock);
|
|
|
+static unsigned long __clk_get_accuracy(struct clk_core *core)
|
|
|
+{
|
|
|
+ if (!core)
|
|
|
+ return 0;
|
|
|
|
|
|
- return ret;
|
|
|
+ return core->accuracy;
|
|
|
}
|
|
|
|
|
|
- /**
|
|
|
- * clk_debug_unregister - remove a clk node from the debugfs clk tree
|
|
|
- * @core: the clk being removed from the debugfs clk tree
|
|
|
- *
|
|
|
- * Dynamically removes a clk and all it's children clk nodes from the
|
|
|
- * debugfs clk tree if clk->dentry points to debugfs created by
|
|
|
- * clk_debug_register in __clk_init.
|
|
|
- */
|
|
|
-static void clk_debug_unregister(struct clk_core *core)
|
|
|
+unsigned long __clk_get_flags(struct clk *clk)
|
|
|
{
|
|
|
- mutex_lock(&clk_debug_lock);
|
|
|
- hlist_del_init(&core->debug_node);
|
|
|
- debugfs_remove_recursive(core->dentry);
|
|
|
- core->dentry = NULL;
|
|
|
- mutex_unlock(&clk_debug_lock);
|
|
|
+ return !clk ? 0 : clk->core->flags;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_get_flags);
|
|
|
|
|
|
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
|
|
|
- void *data, const struct file_operations *fops)
|
|
|
+bool __clk_is_prepared(struct clk *clk)
|
|
|
{
|
|
|
- struct dentry *d = NULL;
|
|
|
-
|
|
|
- if (hw->core->dentry)
|
|
|
- d = debugfs_create_file(name, mode, hw->core->dentry, data,
|
|
|
- fops);
|
|
|
+ if (!clk)
|
|
|
+ return false;
|
|
|
|
|
|
- return d;
|
|
|
+ return clk_core_is_prepared(clk->core);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
|
|
|
|
|
|
-/**
|
|
|
- * clk_debug_init - lazily create the debugfs clk tree visualization
|
|
|
- *
|
|
|
- * clks are often initialized very early during boot before memory can
|
|
|
- * be dynamically allocated and well before debugfs is setup.
|
|
|
- * clk_debug_init walks the clk tree hierarchy while holding
|
|
|
- * prepare_lock and creates the topology as part of a late_initcall,
|
|
|
- * thus insuring that clks initialized very early will still be
|
|
|
- * represented in the debugfs clk tree. This function should only be
|
|
|
- * called once at boot-time, and all other clks added dynamically will
|
|
|
- * be done so with clk_debug_register.
|
|
|
- */
|
|
|
-static int __init clk_debug_init(void)
|
|
|
+bool __clk_is_enabled(struct clk *clk)
|
|
|
{
|
|
|
- struct clk_core *core;
|
|
|
- struct dentry *d;
|
|
|
-
|
|
|
- rootdir = debugfs_create_dir("clk", NULL);
|
|
|
+ if (!clk)
|
|
|
+ return false;
|
|
|
|
|
|
- if (!rootdir)
|
|
|
- return -ENOMEM;
|
|
|
+ return clk_core_is_enabled(clk->core);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_is_enabled);
|
|
|
|
|
|
- d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
|
|
|
- &clk_summary_fops);
|
|
|
- if (!d)
|
|
|
- return -ENOMEM;
|
|
|
+static bool mux_is_better_rate(unsigned long rate, unsigned long now,
|
|
|
+ unsigned long best, unsigned long flags)
|
|
|
+{
|
|
|
+ if (flags & CLK_MUX_ROUND_CLOSEST)
|
|
|
+ return abs(now - rate) < abs(best - rate);
|
|
|
|
|
|
- d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
|
|
|
- &clk_dump_fops);
|
|
|
- if (!d)
|
|
|
- return -ENOMEM;
|
|
|
+ return now <= rate && now > best;
|
|
|
+}
|
|
|
|
|
|
- d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
|
|
|
- &orphan_list, &clk_summary_fops);
|
|
|
- if (!d)
|
|
|
- return -ENOMEM;
|
|
|
+static long
|
|
|
+clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
|
|
|
+ unsigned long min_rate,
|
|
|
+ unsigned long max_rate,
|
|
|
+ unsigned long *best_parent_rate,
|
|
|
+ struct clk_hw **best_parent_p,
|
|
|
+ unsigned long flags)
|
|
|
+{
|
|
|
+ struct clk_core *core = hw->core, *parent, *best_parent = NULL;
|
|
|
+ int i, num_parents;
|
|
|
+ unsigned long parent_rate, best = 0;
|
|
|
|
|
|
- d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
|
|
|
- &orphan_list, &clk_dump_fops);
|
|
|
- if (!d)
|
|
|
- return -ENOMEM;
|
|
|
+ /* if NO_REPARENT flag set, pass through to current parent */
|
|
|
+ if (core->flags & CLK_SET_RATE_NO_REPARENT) {
|
|
|
+ parent = core->parent;
|
|
|
+ if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
+ best = __clk_determine_rate(parent ? parent->hw : NULL,
|
|
|
+ rate, min_rate, max_rate);
|
|
|
+ else if (parent)
|
|
|
+ best = clk_core_get_rate_nolock(parent);
|
|
|
+ else
|
|
|
+ best = clk_core_get_rate_nolock(core);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- mutex_lock(&clk_debug_lock);
|
|
|
- hlist_for_each_entry(core, &clk_debug_list, debug_node)
|
|
|
- clk_debug_create_one(core, rootdir);
|
|
|
+ /* find the parent that can provide the fastest rate <= rate */
|
|
|
+ num_parents = core->num_parents;
|
|
|
+ for (i = 0; i < num_parents; i++) {
|
|
|
+ parent = clk_core_get_parent_by_index(core, i);
|
|
|
+ if (!parent)
|
|
|
+ continue;
|
|
|
+ if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
+ parent_rate = __clk_determine_rate(parent->hw, rate,
|
|
|
+ min_rate,
|
|
|
+ max_rate);
|
|
|
+ else
|
|
|
+ parent_rate = clk_core_get_rate_nolock(parent);
|
|
|
+ if (mux_is_better_rate(rate, parent_rate, best, flags)) {
|
|
|
+ best_parent = parent;
|
|
|
+ best = parent_rate;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- inited = 1;
|
|
|
- mutex_unlock(&clk_debug_lock);
|
|
|
+out:
|
|
|
+ if (best_parent)
|
|
|
+ *best_parent_p = best_parent->hw;
|
|
|
+ *best_parent_rate = best;
|
|
|
|
|
|
- return 0;
|
|
|
-}
|
|
|
-late_initcall(clk_debug_init);
|
|
|
-#else
|
|
|
-static inline int clk_debug_register(struct clk_core *core) { return 0; }
|
|
|
-static inline void clk_debug_reparent(struct clk_core *core,
|
|
|
- struct clk_core *new_parent)
|
|
|
-{
|
|
|
+ return best;
|
|
|
}
|
|
|
-static inline void clk_debug_unregister(struct clk_core *core)
|
|
|
+
|
|
|
+struct clk *__clk_lookup(const char *name)
|
|
|
{
|
|
|
+ struct clk_core *core = clk_core_lookup(name);
|
|
|
+
|
|
|
+ return !core ? NULL : core->hw->clk;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
-/* caller must hold prepare_lock */
|
|
|
-static void clk_unprepare_unused_subtree(struct clk_core *core)
|
|
|
+static void clk_core_get_boundaries(struct clk_core *core,
|
|
|
+ unsigned long *min_rate,
|
|
|
+ unsigned long *max_rate)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
+ struct clk *clk_user;
|
|
|
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
+ *min_rate = 0;
|
|
|
+ *max_rate = ULONG_MAX;
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node)
|
|
|
- clk_unprepare_unused_subtree(child);
|
|
|
+ hlist_for_each_entry(clk_user, &core->clks, clks_node)
|
|
|
+ *min_rate = max(*min_rate, clk_user->min_rate);
|
|
|
|
|
|
- if (core->prepare_count)
|
|
|
- return;
|
|
|
+ hlist_for_each_entry(clk_user, &core->clks, clks_node)
|
|
|
+ *max_rate = min(*max_rate, clk_user->max_rate);
|
|
|
+}
|
|
|
|
|
|
- if (core->flags & CLK_IGNORE_UNUSED)
|
|
|
- return;
|
|
|
-
|
|
|
- if (clk_core_is_prepared(core)) {
|
|
|
- trace_clk_unprepare(core);
|
|
|
- if (core->ops->unprepare_unused)
|
|
|
- core->ops->unprepare_unused(core->hw);
|
|
|
- else if (core->ops->unprepare)
|
|
|
- core->ops->unprepare(core->hw);
|
|
|
- trace_clk_unprepare_complete(core);
|
|
|
- }
|
|
|
+/*
|
|
|
+ * Helper for finding best parent to provide a given frequency. This can be used
|
|
|
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
|
|
|
+ * complex clock that may combine a mux with other operations.
|
|
|
+ */
|
|
|
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
+ unsigned long min_rate,
|
|
|
+ unsigned long max_rate,
|
|
|
+ unsigned long *best_parent_rate,
|
|
|
+ struct clk_hw **best_parent_p)
|
|
|
+{
|
|
|
+ return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
|
|
|
+ best_parent_rate,
|
|
|
+ best_parent_p, 0);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
|
|
|
|
|
|
-/* caller must hold prepare_lock */
|
|
|
-static void clk_disable_unused_subtree(struct clk_core *core)
|
|
|
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
|
|
|
+ unsigned long min_rate,
|
|
|
+ unsigned long max_rate,
|
|
|
+ unsigned long *best_parent_rate,
|
|
|
+ struct clk_hw **best_parent_p)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
- unsigned long flags;
|
|
|
+ return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
|
|
|
+ best_parent_rate,
|
|
|
+ best_parent_p,
|
|
|
+ CLK_MUX_ROUND_CLOSEST);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
|
|
|
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
+/*** clk api ***/
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node)
|
|
|
- clk_disable_unused_subtree(child);
|
|
|
+static void clk_core_unprepare(struct clk_core *core)
|
|
|
+{
|
|
|
+ if (!core)
|
|
|
+ return;
|
|
|
|
|
|
- flags = clk_enable_lock();
|
|
|
+ if (WARN_ON(core->prepare_count == 0))
|
|
|
+ return;
|
|
|
|
|
|
- if (core->enable_count)
|
|
|
- goto unlock_out;
|
|
|
+ if (--core->prepare_count > 0)
|
|
|
+ return;
|
|
|
|
|
|
- if (core->flags & CLK_IGNORE_UNUSED)
|
|
|
- goto unlock_out;
|
|
|
+ WARN_ON(core->enable_count > 0);
|
|
|
|
|
|
- /*
|
|
|
- * some gate clocks have special needs during the disable-unused
|
|
|
- * sequence. call .disable_unused if available, otherwise fall
|
|
|
- * back to .disable
|
|
|
- */
|
|
|
- if (clk_core_is_enabled(core)) {
|
|
|
- trace_clk_disable(core);
|
|
|
- if (core->ops->disable_unused)
|
|
|
- core->ops->disable_unused(core->hw);
|
|
|
- else if (core->ops->disable)
|
|
|
- core->ops->disable(core->hw);
|
|
|
- trace_clk_disable_complete(core);
|
|
|
- }
|
|
|
+ trace_clk_unprepare(core);
|
|
|
|
|
|
-unlock_out:
|
|
|
- clk_enable_unlock(flags);
|
|
|
+ if (core->ops->unprepare)
|
|
|
+ core->ops->unprepare(core->hw);
|
|
|
+
|
|
|
+ trace_clk_unprepare_complete(core);
|
|
|
+ clk_core_unprepare(core->parent);
|
|
|
}
|
|
|
|
|
|
-static bool clk_ignore_unused;
|
|
|
-static int __init clk_ignore_unused_setup(char *__unused)
|
|
|
+/**
|
|
|
+ * clk_unprepare - undo preparation of a clock source
|
|
|
+ * @clk: the clk being unprepared
|
|
|
+ *
|
|
|
+ * clk_unprepare may sleep, which differentiates it from clk_disable. In a
|
|
|
+ * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
|
|
|
+ * if the operation may sleep. One example is a clk which is accessed over
|
|
|
+ * I2c. In the complex case a clk gate operation may require a fast and a slow
|
|
|
+ * part. It is this reason that clk_unprepare and clk_disable are not mutually
|
|
|
+ * exclusive. In fact clk_disable must be called before clk_unprepare.
|
|
|
+ */
|
|
|
+void clk_unprepare(struct clk *clk)
|
|
|
{
|
|
|
- clk_ignore_unused = true;
|
|
|
- return 1;
|
|
|
+ if (IS_ERR_OR_NULL(clk))
|
|
|
+ return;
|
|
|
+
|
|
|
+ clk_prepare_lock();
|
|
|
+ clk_core_unprepare(clk->core);
|
|
|
+ clk_prepare_unlock();
|
|
|
}
|
|
|
-__setup("clk_ignore_unused", clk_ignore_unused_setup);
|
|
|
+EXPORT_SYMBOL_GPL(clk_unprepare);
|
|
|
|
|
|
-static int clk_disable_unused(void)
|
|
|
+static int clk_core_prepare(struct clk_core *core)
|
|
|
{
|
|
|
- struct clk_core *core;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- if (clk_ignore_unused) {
|
|
|
- pr_warn("clk: Not disabling unused clocks\n");
|
|
|
+ if (!core)
|
|
|
return 0;
|
|
|
- }
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
+ if (core->prepare_count == 0) {
|
|
|
+ ret = clk_core_prepare(core->parent);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
- hlist_for_each_entry(core, &clk_root_list, child_node)
|
|
|
- clk_disable_unused_subtree(core);
|
|
|
+ trace_clk_prepare(core);
|
|
|
|
|
|
- hlist_for_each_entry(core, &clk_orphan_list, child_node)
|
|
|
- clk_disable_unused_subtree(core);
|
|
|
+ if (core->ops->prepare)
|
|
|
+ ret = core->ops->prepare(core->hw);
|
|
|
|
|
|
- hlist_for_each_entry(core, &clk_root_list, child_node)
|
|
|
- clk_unprepare_unused_subtree(core);
|
|
|
+ trace_clk_prepare_complete(core);
|
|
|
|
|
|
- hlist_for_each_entry(core, &clk_orphan_list, child_node)
|
|
|
- clk_unprepare_unused_subtree(core);
|
|
|
+ if (ret) {
|
|
|
+ clk_core_unprepare(core->parent);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- clk_prepare_unlock();
|
|
|
+ core->prepare_count++;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-late_initcall_sync(clk_disable_unused);
|
|
|
-
|
|
|
-/*** helper functions ***/
|
|
|
|
|
|
-const char *__clk_get_name(struct clk *clk)
|
|
|
+/**
|
|
|
+ * clk_prepare - prepare a clock source
|
|
|
+ * @clk: the clk being prepared
|
|
|
+ *
|
|
|
+ * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
|
|
|
+ * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
|
|
|
+ * operation may sleep. One example is a clk which is accessed over I2c. In
|
|
|
+ * the complex case a clk ungate operation may require a fast and a slow part.
|
|
|
+ * It is this reason that clk_prepare and clk_enable are not mutually
|
|
|
+ * exclusive. In fact clk_prepare must be called before clk_enable.
|
|
|
+ * Returns 0 on success, -EERROR otherwise.
|
|
|
+ */
|
|
|
+int clk_prepare(struct clk *clk)
|
|
|
{
|
|
|
- return !clk ? NULL : clk->core->name;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_name);
|
|
|
+ int ret;
|
|
|
|
|
|
-struct clk_hw *__clk_get_hw(struct clk *clk)
|
|
|
-{
|
|
|
- return !clk ? NULL : clk->core->hw;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_hw);
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
-u8 __clk_get_num_parents(struct clk *clk)
|
|
|
-{
|
|
|
- return !clk ? 0 : clk->core->num_parents;
|
|
|
+ clk_prepare_lock();
|
|
|
+ ret = clk_core_prepare(clk->core);
|
|
|
+ clk_prepare_unlock();
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_num_parents);
|
|
|
+EXPORT_SYMBOL_GPL(clk_prepare);
|
|
|
|
|
|
-struct clk *__clk_get_parent(struct clk *clk)
|
|
|
+static void clk_core_disable(struct clk_core *core)
|
|
|
{
|
|
|
- if (!clk)
|
|
|
- return NULL;
|
|
|
+ if (!core)
|
|
|
+ return;
|
|
|
|
|
|
- /* TODO: Create a per-user clk and change callers to call clk_put */
|
|
|
- return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_parent);
|
|
|
+ if (WARN_ON(core->enable_count == 0))
|
|
|
+ return;
|
|
|
|
|
|
-static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
|
|
|
- u8 index)
|
|
|
-{
|
|
|
- if (!core || index >= core->num_parents)
|
|
|
- return NULL;
|
|
|
- else if (!core->parents)
|
|
|
- return clk_core_lookup(core->parent_names[index]);
|
|
|
- else if (!core->parents[index])
|
|
|
- return core->parents[index] =
|
|
|
- clk_core_lookup(core->parent_names[index]);
|
|
|
- else
|
|
|
- return core->parents[index];
|
|
|
-}
|
|
|
+ if (--core->enable_count > 0)
|
|
|
+ return;
|
|
|
|
|
|
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
|
|
|
-{
|
|
|
- struct clk_core *parent;
|
|
|
+ trace_clk_disable(core);
|
|
|
|
|
|
- if (!clk)
|
|
|
- return NULL;
|
|
|
+ if (core->ops->disable)
|
|
|
+ core->ops->disable(core->hw);
|
|
|
|
|
|
- parent = clk_core_get_parent_by_index(clk->core, index);
|
|
|
+ trace_clk_disable_complete(core);
|
|
|
|
|
|
- return !parent ? NULL : parent->hw->clk;
|
|
|
+ clk_core_disable(core->parent);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
|
|
|
|
|
|
-unsigned int __clk_get_enable_count(struct clk *clk)
|
|
|
+/**
|
|
|
+ * clk_disable - gate a clock
|
|
|
+ * @clk: the clk being gated
|
|
|
+ *
|
|
|
+ * clk_disable must not sleep, which differentiates it from clk_unprepare. In
|
|
|
+ * a simple case, clk_disable can be used instead of clk_unprepare to gate a
|
|
|
+ * clk if the operation is fast and will never sleep. One example is a
|
|
|
+ * SoC-internal clk which is controlled via simple register writes. In the
|
|
|
+ * complex case a clk gate operation may require a fast and a slow part. It is
|
|
|
+ * this reason that clk_unprepare and clk_disable are not mutually exclusive.
|
|
|
+ * In fact clk_disable must be called before clk_unprepare.
|
|
|
+ */
|
|
|
+void clk_disable(struct clk *clk)
|
|
|
{
|
|
|
- return !clk ? 0 : clk->core->enable_count;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (IS_ERR_OR_NULL(clk))
|
|
|
+ return;
|
|
|
+
|
|
|
+ flags = clk_enable_lock();
|
|
|
+ clk_core_disable(clk->core);
|
|
|
+ clk_enable_unlock(flags);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_disable);
|
|
|
|
|
|
-static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
|
|
|
+static int clk_core_enable(struct clk_core *core)
|
|
|
{
|
|
|
- unsigned long ret;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- if (!core) {
|
|
|
- ret = 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (!core)
|
|
|
+ return 0;
|
|
|
|
|
|
- ret = core->rate;
|
|
|
+ if (WARN_ON(core->prepare_count == 0))
|
|
|
+ return -ESHUTDOWN;
|
|
|
|
|
|
- if (core->flags & CLK_IS_ROOT)
|
|
|
- goto out;
|
|
|
+ if (core->enable_count == 0) {
|
|
|
+ ret = clk_core_enable(core->parent);
|
|
|
|
|
|
- if (!core->parent)
|
|
|
- ret = 0;
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
-out:
|
|
|
- return ret;
|
|
|
-}
|
|
|
+ trace_clk_enable(core);
|
|
|
|
|
|
-unsigned long __clk_get_rate(struct clk *clk)
|
|
|
-{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ if (core->ops->enable)
|
|
|
+ ret = core->ops->enable(core->hw);
|
|
|
|
|
|
- return clk_core_get_rate_nolock(clk->core);
|
|
|
+ trace_clk_enable_complete(core);
|
|
|
+
|
|
|
+ if (ret) {
|
|
|
+ clk_core_disable(core->parent);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ core->enable_count++;
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_rate);
|
|
|
|
|
|
-static unsigned long __clk_get_accuracy(struct clk_core *core)
|
|
|
+/**
|
|
|
+ * clk_enable - ungate a clock
|
|
|
+ * @clk: the clk being ungated
|
|
|
+ *
|
|
|
+ * clk_enable must not sleep, which differentiates it from clk_prepare. In a
|
|
|
+ * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
|
|
|
+ * if the operation will never sleep. One example is a SoC-internal clk which
|
|
|
+ * is controlled via simple register writes. In the complex case a clk ungate
|
|
|
+ * operation may require a fast and a slow part. It is this reason that
|
|
|
+ * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
|
|
|
+ * must be called before clk_enable. Returns 0 on success, -EERROR
|
|
|
+ * otherwise.
|
|
|
+ */
|
|
|
+int clk_enable(struct clk *clk)
|
|
|
{
|
|
|
- if (!core)
|
|
|
+ unsigned long flags;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!clk)
|
|
|
return 0;
|
|
|
|
|
|
- return core->accuracy;
|
|
|
-}
|
|
|
+ flags = clk_enable_lock();
|
|
|
+ ret = clk_core_enable(clk->core);
|
|
|
+ clk_enable_unlock(flags);
|
|
|
|
|
|
-unsigned long __clk_get_flags(struct clk *clk)
|
|
|
-{
|
|
|
- return !clk ? 0 : clk->core->flags;
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_get_flags);
|
|
|
+EXPORT_SYMBOL_GPL(clk_enable);
|
|
|
|
|
|
-static bool clk_core_is_prepared(struct clk_core *core)
|
|
|
+static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
|
|
|
+ unsigned long rate,
|
|
|
+ unsigned long min_rate,
|
|
|
+ unsigned long max_rate)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ unsigned long parent_rate = 0;
|
|
|
+ struct clk_core *parent;
|
|
|
+ struct clk_hw *parent_hw;
|
|
|
+
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
|
|
|
if (!core)
|
|
|
- return false;
|
|
|
+ return 0;
|
|
|
|
|
|
- /*
|
|
|
- * .is_prepared is optional for clocks that can prepare
|
|
|
- * fall back to software usage counter if it is missing
|
|
|
- */
|
|
|
- if (!core->ops->is_prepared) {
|
|
|
- ret = core->prepare_count ? 1 : 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ parent = core->parent;
|
|
|
+ if (parent)
|
|
|
+ parent_rate = parent->rate;
|
|
|
|
|
|
- ret = core->ops->is_prepared(core->hw);
|
|
|
-out:
|
|
|
- return !!ret;
|
|
|
+ if (core->ops->determine_rate) {
|
|
|
+ parent_hw = parent ? parent->hw : NULL;
|
|
|
+ return core->ops->determine_rate(core->hw, rate,
|
|
|
+ min_rate, max_rate,
|
|
|
+ &parent_rate, &parent_hw);
|
|
|
+ } else if (core->ops->round_rate)
|
|
|
+ return core->ops->round_rate(core->hw, rate, &parent_rate);
|
|
|
+ else if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
+ return clk_core_round_rate_nolock(core->parent, rate, min_rate,
|
|
|
+ max_rate);
|
|
|
+ else
|
|
|
+ return core->rate;
|
|
|
}
|
|
|
|
|
|
-bool __clk_is_prepared(struct clk *clk)
|
|
|
+/**
|
|
|
+ * __clk_determine_rate - get the closest rate actually supported by a clock
|
|
|
+ * @hw: determine the rate of this clock
|
|
|
+ * @rate: target rate
|
|
|
+ * @min_rate: returned rate must be greater than this rate
|
|
|
+ * @max_rate: returned rate must be less than this rate
|
|
|
+ *
|
|
|
+ * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
|
|
|
+ * .determine_rate.
|
|
|
+ */
|
|
|
+unsigned long __clk_determine_rate(struct clk_hw *hw,
|
|
|
+ unsigned long rate,
|
|
|
+ unsigned long min_rate,
|
|
|
+ unsigned long max_rate)
|
|
|
{
|
|
|
- if (!clk)
|
|
|
- return false;
|
|
|
+ if (!hw)
|
|
|
+ return 0;
|
|
|
|
|
|
- return clk_core_is_prepared(clk->core);
|
|
|
+ return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_determine_rate);
|
|
|
|
|
|
-static bool clk_core_is_enabled(struct clk_core *core)
|
|
|
+/**
|
|
|
+ * __clk_round_rate - round the given rate for a clk
|
|
|
+ * @clk: round the rate of this clock
|
|
|
+ * @rate: the rate which is to be rounded
|
|
|
+ *
|
|
|
+ * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
|
|
|
+ */
|
|
|
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ unsigned long min_rate;
|
|
|
+ unsigned long max_rate;
|
|
|
|
|
|
- if (!core)
|
|
|
- return false;
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- /*
|
|
|
- * .is_enabled is only mandatory for clocks that gate
|
|
|
- * fall back to software usage counter if .is_enabled is missing
|
|
|
- */
|
|
|
- if (!core->ops->is_enabled) {
|
|
|
- ret = core->enable_count ? 1 : 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
|
|
|
|
|
|
- ret = core->ops->is_enabled(core->hw);
|
|
|
-out:
|
|
|
- return !!ret;
|
|
|
+ return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__clk_round_rate);
|
|
|
|
|
|
-bool __clk_is_enabled(struct clk *clk)
|
|
|
+/**
|
|
|
+ * clk_round_rate - round the given rate for a clk
|
|
|
+ * @clk: the clk for which we are rounding a rate
|
|
|
+ * @rate: the rate which is to be rounded
|
|
|
+ *
|
|
|
+ * Takes in a rate as input and rounds it to a rate that the clk can actually
|
|
|
+ * use which is then returned. If clk doesn't support round_rate operation
|
|
|
+ * then the parent rate is returned.
|
|
|
+ */
|
|
|
+long clk_round_rate(struct clk *clk, unsigned long rate)
|
|
|
{
|
|
|
+ unsigned long ret;
|
|
|
+
|
|
|
if (!clk)
|
|
|
- return false;
|
|
|
+ return 0;
|
|
|
|
|
|
- return clk_core_is_enabled(clk->core);
|
|
|
+ clk_prepare_lock();
|
|
|
+ ret = __clk_round_rate(clk, rate);
|
|
|
+ clk_prepare_unlock();
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_is_enabled);
|
|
|
+EXPORT_SYMBOL_GPL(clk_round_rate);
|
|
|
|
|
|
-static struct clk_core *__clk_lookup_subtree(const char *name,
|
|
|
- struct clk_core *core)
|
|
|
+/**
|
|
|
+ * __clk_notify - call clk notifier chain
|
|
|
+ * @core: clk that is changing rate
|
|
|
+ * @msg: clk notifier type (see include/linux/clk.h)
|
|
|
+ * @old_rate: old clk rate
|
|
|
+ * @new_rate: new clk rate
|
|
|
+ *
|
|
|
+ * Triggers a notifier call chain on the clk rate-change notification
|
|
|
+ * for 'clk'. Passes a pointer to the struct clk and the previous
|
|
|
+ * and current rates to the notifier callback. Intended to be called by
|
|
|
+ * internal clock code only. Returns NOTIFY_DONE from the last driver
|
|
|
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
|
|
|
+ * a driver returns that.
|
|
|
+ */
|
|
|
+static int __clk_notify(struct clk_core *core, unsigned long msg,
|
|
|
+ unsigned long old_rate, unsigned long new_rate)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
- struct clk_core *ret;
|
|
|
+ struct clk_notifier *cn;
|
|
|
+ struct clk_notifier_data cnd;
|
|
|
+ int ret = NOTIFY_DONE;
|
|
|
|
|
|
- if (!strcmp(core->name, name))
|
|
|
- return core;
|
|
|
+ cnd.old_rate = old_rate;
|
|
|
+ cnd.new_rate = new_rate;
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
- ret = __clk_lookup_subtree(name, child);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ list_for_each_entry(cn, &clk_notifier_list, node) {
|
|
|
+ if (cn->clk->core == core) {
|
|
|
+ cnd.clk = cn->clk;
|
|
|
+ ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
|
|
|
+ &cnd);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- return NULL;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static struct clk_core *clk_core_lookup(const char *name)
|
|
|
+/**
|
|
|
+ * __clk_recalc_accuracies
|
|
|
+ * @core: first clk in the subtree
|
|
|
+ *
|
|
|
+ * Walks the subtree of clks starting with clk and recalculates accuracies as
|
|
|
+ * it goes. Note that if a clk does not implement the .recalc_accuracy
|
|
|
+ * callback then it is assumed that the clock will take on the accuracy of it's
|
|
|
+ * parent.
|
|
|
+ *
|
|
|
+ * Caller must hold prepare_lock.
|
|
|
+ */
|
|
|
+static void __clk_recalc_accuracies(struct clk_core *core)
|
|
|
{
|
|
|
- struct clk_core *root_clk;
|
|
|
- struct clk_core *ret;
|
|
|
+ unsigned long parent_accuracy = 0;
|
|
|
+ struct clk_core *child;
|
|
|
|
|
|
- if (!name)
|
|
|
- return NULL;
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
|
|
|
- /* search the 'proper' clk tree first */
|
|
|
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
|
|
|
- ret = __clk_lookup_subtree(name, root_clk);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ if (core->parent)
|
|
|
+ parent_accuracy = core->parent->accuracy;
|
|
|
|
|
|
- /* if not found, then search the orphan tree */
|
|
|
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
|
|
|
- ret = __clk_lookup_subtree(name, root_clk);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ if (core->ops->recalc_accuracy)
|
|
|
+ core->accuracy = core->ops->recalc_accuracy(core->hw,
|
|
|
+ parent_accuracy);
|
|
|
+ else
|
|
|
+ core->accuracy = parent_accuracy;
|
|
|
|
|
|
- return NULL;
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node)
|
|
|
+ __clk_recalc_accuracies(child);
|
|
|
}
|
|
|
|
|
|
-static bool mux_is_better_rate(unsigned long rate, unsigned long now,
|
|
|
- unsigned long best, unsigned long flags)
|
|
|
+static long clk_core_get_accuracy(struct clk_core *core)
|
|
|
{
|
|
|
- if (flags & CLK_MUX_ROUND_CLOSEST)
|
|
|
- return abs(now - rate) < abs(best - rate);
|
|
|
+ unsigned long accuracy;
|
|
|
|
|
|
- return now <= rate && now > best;
|
|
|
-}
|
|
|
+ clk_prepare_lock();
|
|
|
+ if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
|
|
|
+ __clk_recalc_accuracies(core);
|
|
|
|
|
|
-static long
|
|
|
-clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
|
|
|
- unsigned long min_rate,
|
|
|
- unsigned long max_rate,
|
|
|
- unsigned long *best_parent_rate,
|
|
|
- struct clk_hw **best_parent_p,
|
|
|
- unsigned long flags)
|
|
|
-{
|
|
|
- struct clk_core *core = hw->core, *parent, *best_parent = NULL;
|
|
|
- int i, num_parents;
|
|
|
- unsigned long parent_rate, best = 0;
|
|
|
-
|
|
|
- /* if NO_REPARENT flag set, pass through to current parent */
|
|
|
- if (core->flags & CLK_SET_RATE_NO_REPARENT) {
|
|
|
- parent = core->parent;
|
|
|
- if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
- best = __clk_determine_rate(parent ? parent->hw : NULL,
|
|
|
- rate, min_rate, max_rate);
|
|
|
- else if (parent)
|
|
|
- best = clk_core_get_rate_nolock(parent);
|
|
|
- else
|
|
|
- best = clk_core_get_rate_nolock(core);
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* find the parent that can provide the fastest rate <= rate */
|
|
|
- num_parents = core->num_parents;
|
|
|
- for (i = 0; i < num_parents; i++) {
|
|
|
- parent = clk_core_get_parent_by_index(core, i);
|
|
|
- if (!parent)
|
|
|
- continue;
|
|
|
- if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
- parent_rate = __clk_determine_rate(parent->hw, rate,
|
|
|
- min_rate,
|
|
|
- max_rate);
|
|
|
- else
|
|
|
- parent_rate = clk_core_get_rate_nolock(parent);
|
|
|
- if (mux_is_better_rate(rate, parent_rate, best, flags)) {
|
|
|
- best_parent = parent;
|
|
|
- best = parent_rate;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
-out:
|
|
|
- if (best_parent)
|
|
|
- *best_parent_p = best_parent->hw;
|
|
|
- *best_parent_rate = best;
|
|
|
+ accuracy = __clk_get_accuracy(core);
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- return best;
|
|
|
+ return accuracy;
|
|
|
}
|
|
|
|
|
|
-struct clk *__clk_lookup(const char *name)
|
|
|
+/**
|
|
|
+ * clk_get_accuracy - return the accuracy of clk
|
|
|
+ * @clk: the clk whose accuracy is being returned
|
|
|
+ *
|
|
|
+ * Simply returns the cached accuracy of the clk, unless
|
|
|
+ * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
|
|
|
+ * issued.
|
|
|
+ * If clk is NULL then returns 0.
|
|
|
+ */
|
|
|
+long clk_get_accuracy(struct clk *clk)
|
|
|
{
|
|
|
- struct clk_core *core = clk_core_lookup(name);
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- return !core ? NULL : core->hw->clk;
|
|
|
+ return clk_core_get_accuracy(clk->core);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_get_accuracy);
|
|
|
|
|
|
-static void clk_core_get_boundaries(struct clk_core *core,
|
|
|
- unsigned long *min_rate,
|
|
|
- unsigned long *max_rate)
|
|
|
+static unsigned long clk_recalc(struct clk_core *core,
|
|
|
+ unsigned long parent_rate)
|
|
|
{
|
|
|
- struct clk *clk_user;
|
|
|
-
|
|
|
- *min_rate = 0;
|
|
|
- *max_rate = ULONG_MAX;
|
|
|
-
|
|
|
- hlist_for_each_entry(clk_user, &core->clks, clks_node)
|
|
|
- *min_rate = max(*min_rate, clk_user->min_rate);
|
|
|
-
|
|
|
- hlist_for_each_entry(clk_user, &core->clks, clks_node)
|
|
|
- *max_rate = min(*max_rate, clk_user->max_rate);
|
|
|
+ if (core->ops->recalc_rate)
|
|
|
+ return core->ops->recalc_rate(core->hw, parent_rate);
|
|
|
+ return parent_rate;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Helper for finding best parent to provide a given frequency. This can be used
|
|
|
- * directly as a determine_rate callback (e.g. for a mux), or from a more
|
|
|
- * complex clock that may combine a mux with other operations.
|
|
|
+/**
|
|
|
+ * __clk_recalc_rates
|
|
|
+ * @core: first clk in the subtree
|
|
|
+ * @msg: notification type (see include/linux/clk.h)
|
|
|
+ *
|
|
|
+ * Walks the subtree of clks starting with clk and recalculates rates as it
|
|
|
+ * goes. Note that if a clk does not implement the .recalc_rate callback then
|
|
|
+ * it is assumed that the clock will take on the rate of its parent.
|
|
|
+ *
|
|
|
+ * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
|
|
|
+ * if necessary.
|
|
|
+ *
|
|
|
+ * Caller must hold prepare_lock.
|
|
|
*/
|
|
|
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
- unsigned long min_rate,
|
|
|
- unsigned long max_rate,
|
|
|
- unsigned long *best_parent_rate,
|
|
|
- struct clk_hw **best_parent_p)
|
|
|
+static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
|
|
|
{
|
|
|
- return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
|
|
|
- best_parent_rate,
|
|
|
- best_parent_p, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
|
|
|
+ unsigned long old_rate;
|
|
|
+ unsigned long parent_rate = 0;
|
|
|
+ struct clk_core *child;
|
|
|
|
|
|
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
|
|
|
- unsigned long min_rate,
|
|
|
- unsigned long max_rate,
|
|
|
- unsigned long *best_parent_rate,
|
|
|
- struct clk_hw **best_parent_p)
|
|
|
-{
|
|
|
- return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
|
|
|
- best_parent_rate,
|
|
|
- best_parent_p,
|
|
|
- CLK_MUX_ROUND_CLOSEST);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
|
|
|
-/*** clk api ***/
|
|
|
+ old_rate = core->rate;
|
|
|
|
|
|
-static void clk_core_unprepare(struct clk_core *core)
|
|
|
-{
|
|
|
- if (!core)
|
|
|
- return;
|
|
|
+ if (core->parent)
|
|
|
+ parent_rate = core->parent->rate;
|
|
|
|
|
|
- if (WARN_ON(core->prepare_count == 0))
|
|
|
- return;
|
|
|
+ core->rate = clk_recalc(core, parent_rate);
|
|
|
|
|
|
- if (--core->prepare_count > 0)
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
|
|
|
+ * & ABORT_RATE_CHANGE notifiers
|
|
|
+ */
|
|
|
+ if (core->notifier_count && msg)
|
|
|
+ __clk_notify(core, msg, old_rate, core->rate);
|
|
|
|
|
|
- WARN_ON(core->enable_count > 0);
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node)
|
|
|
+ __clk_recalc_rates(child, msg);
|
|
|
+}
|
|
|
|
|
|
- trace_clk_unprepare(core);
|
|
|
+static unsigned long clk_core_get_rate(struct clk_core *core)
|
|
|
+{
|
|
|
+ unsigned long rate;
|
|
|
|
|
|
- if (core->ops->unprepare)
|
|
|
- core->ops->unprepare(core->hw);
|
|
|
+ clk_prepare_lock();
|
|
|
|
|
|
- trace_clk_unprepare_complete(core);
|
|
|
- clk_core_unprepare(core->parent);
|
|
|
+ if (core && (core->flags & CLK_GET_RATE_NOCACHE))
|
|
|
+ __clk_recalc_rates(core, 0);
|
|
|
+
|
|
|
+ rate = clk_core_get_rate_nolock(core);
|
|
|
+ clk_prepare_unlock();
|
|
|
+
|
|
|
+ return rate;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * clk_unprepare - undo preparation of a clock source
|
|
|
- * @clk: the clk being unprepared
|
|
|
+ * clk_get_rate - return the rate of clk
|
|
|
+ * @clk: the clk whose rate is being returned
|
|
|
*
|
|
|
- * clk_unprepare may sleep, which differentiates it from clk_disable. In a
|
|
|
- * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
|
|
|
- * if the operation may sleep. One example is a clk which is accessed over
|
|
|
- * I2c. In the complex case a clk gate operation may require a fast and a slow
|
|
|
- * part. It is this reason that clk_unprepare and clk_disable are not mutually
|
|
|
- * exclusive. In fact clk_disable must be called before clk_unprepare.
|
|
|
+ * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
|
|
|
+ * is set, which means a recalc_rate will be issued.
|
|
|
+ * If clk is NULL then returns 0.
|
|
|
*/
|
|
|
-void clk_unprepare(struct clk *clk)
|
|
|
+unsigned long clk_get_rate(struct clk *clk)
|
|
|
{
|
|
|
- if (IS_ERR_OR_NULL(clk))
|
|
|
- return;
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
- clk_core_unprepare(clk->core);
|
|
|
- clk_prepare_unlock();
|
|
|
+ return clk_core_get_rate(clk->core);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_unprepare);
|
|
|
+EXPORT_SYMBOL_GPL(clk_get_rate);
|
|
|
|
|
|
-static int clk_core_prepare(struct clk_core *core)
|
|
|
+static int clk_fetch_parent_index(struct clk_core *core,
|
|
|
+ struct clk_core *parent)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (!core)
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (core->prepare_count == 0) {
|
|
|
- ret = clk_core_prepare(core->parent);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ int i;
|
|
|
|
|
|
- trace_clk_prepare(core);
|
|
|
+ if (!core->parents) {
|
|
|
+ core->parents = kcalloc(core->num_parents,
|
|
|
+ sizeof(struct clk *), GFP_KERNEL);
|
|
|
+ if (!core->parents)
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
- if (core->ops->prepare)
|
|
|
- ret = core->ops->prepare(core->hw);
|
|
|
+ /*
|
|
|
+ * find index of new parent clock using cached parent ptrs,
|
|
|
+ * or if not yet cached, use string name comparison and cache
|
|
|
+ * them now to avoid future calls to clk_core_lookup.
|
|
|
+ */
|
|
|
+ for (i = 0; i < core->num_parents; i++) {
|
|
|
+ if (core->parents[i] == parent)
|
|
|
+ return i;
|
|
|
|
|
|
- trace_clk_prepare_complete(core);
|
|
|
+ if (core->parents[i])
|
|
|
+ continue;
|
|
|
|
|
|
- if (ret) {
|
|
|
- clk_core_unprepare(core->parent);
|
|
|
- return ret;
|
|
|
+ if (!strcmp(core->parent_names[i], parent->name)) {
|
|
|
+ core->parents[i] = clk_core_lookup(parent->name);
|
|
|
+ return i;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- core->prepare_count++;
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * clk_prepare - prepare a clock source
|
|
|
- * @clk: the clk being prepared
|
|
|
- *
|
|
|
- * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
|
|
|
- * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
|
|
|
- * operation may sleep. One example is a clk which is accessed over I2c. In
|
|
|
- * the complex case a clk ungate operation may require a fast and a slow part.
|
|
|
- * It is this reason that clk_prepare and clk_enable are not mutually
|
|
|
- * exclusive. In fact clk_prepare must be called before clk_enable.
|
|
|
- * Returns 0 on success, -EERROR otherwise.
|
|
|
- */
|
|
|
-int clk_prepare(struct clk *clk)
|
|
|
+static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
|
|
|
{
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ hlist_del(&core->child_node);
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
- ret = clk_core_prepare(clk->core);
|
|
|
- clk_prepare_unlock();
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_prepare);
|
|
|
-
|
|
|
-static void clk_core_disable(struct clk_core *core)
|
|
|
-{
|
|
|
- if (!core)
|
|
|
- return;
|
|
|
-
|
|
|
- if (WARN_ON(core->enable_count == 0))
|
|
|
- return;
|
|
|
-
|
|
|
- if (--core->enable_count > 0)
|
|
|
- return;
|
|
|
-
|
|
|
- trace_clk_disable(core);
|
|
|
-
|
|
|
- if (core->ops->disable)
|
|
|
- core->ops->disable(core->hw);
|
|
|
+ if (new_parent) {
|
|
|
+ /* avoid duplicate POST_RATE_CHANGE notifications */
|
|
|
+ if (new_parent->new_child == core)
|
|
|
+ new_parent->new_child = NULL;
|
|
|
|
|
|
- trace_clk_disable_complete(core);
|
|
|
+ hlist_add_head(&core->child_node, &new_parent->children);
|
|
|
+ } else {
|
|
|
+ hlist_add_head(&core->child_node, &clk_orphan_list);
|
|
|
+ }
|
|
|
|
|
|
- clk_core_disable(core->parent);
|
|
|
+ core->parent = new_parent;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * clk_disable - gate a clock
|
|
|
- * @clk: the clk being gated
|
|
|
- *
|
|
|
- * clk_disable must not sleep, which differentiates it from clk_unprepare. In
|
|
|
- * a simple case, clk_disable can be used instead of clk_unprepare to gate a
|
|
|
- * clk if the operation is fast and will never sleep. One example is a
|
|
|
- * SoC-internal clk which is controlled via simple register writes. In the
|
|
|
- * complex case a clk gate operation may require a fast and a slow part. It is
|
|
|
- * this reason that clk_unprepare and clk_disable are not mutually exclusive.
|
|
|
- * In fact clk_disable must be called before clk_unprepare.
|
|
|
- */
|
|
|
-void clk_disable(struct clk *clk)
|
|
|
+static struct clk_core *__clk_set_parent_before(struct clk_core *core,
|
|
|
+ struct clk_core *parent)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ struct clk_core *old_parent = core->parent;
|
|
|
|
|
|
- if (IS_ERR_OR_NULL(clk))
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * Migrate prepare state between parents and prevent race with
|
|
|
+ * clk_enable().
|
|
|
+ *
|
|
|
+ * If the clock is not prepared, then a race with
|
|
|
+ * clk_enable/disable() is impossible since we already have the
|
|
|
+ * prepare lock (future calls to clk_enable() need to be preceded by
|
|
|
+ * a clk_prepare()).
|
|
|
+ *
|
|
|
+ * If the clock is prepared, migrate the prepared state to the new
|
|
|
+ * parent and also protect against a race with clk_enable() by
|
|
|
+ * forcing the clock and the new parent on. This ensures that all
|
|
|
+ * future calls to clk_enable() are practically NOPs with respect to
|
|
|
+ * hardware and software states.
|
|
|
+ *
|
|
|
+ * See also: Comment for clk_set_parent() below.
|
|
|
+ */
|
|
|
+ if (core->prepare_count) {
|
|
|
+ clk_core_prepare(parent);
|
|
|
+ clk_core_enable(parent);
|
|
|
+ clk_core_enable(core);
|
|
|
+ }
|
|
|
|
|
|
+ /* update the clk tree topology */
|
|
|
flags = clk_enable_lock();
|
|
|
- clk_core_disable(clk->core);
|
|
|
+ clk_reparent(core, parent);
|
|
|
clk_enable_unlock(flags);
|
|
|
+
|
|
|
+ return old_parent;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_disable);
|
|
|
|
|
|
-static int clk_core_enable(struct clk_core *core)
|
|
|
+static void __clk_set_parent_after(struct clk_core *core,
|
|
|
+ struct clk_core *parent,
|
|
|
+ struct clk_core *old_parent)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (!core)
|
|
|
- return 0;
|
|
|
+ /*
|
|
|
+ * Finish the migration of prepare state and undo the changes done
|
|
|
+ * for preventing a race with clk_enable().
|
|
|
+ */
|
|
|
+ if (core->prepare_count) {
|
|
|
+ clk_core_disable(core);
|
|
|
+ clk_core_disable(old_parent);
|
|
|
+ clk_core_unprepare(old_parent);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- if (WARN_ON(core->prepare_count == 0))
|
|
|
- return -ESHUTDOWN;
|
|
|
+static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
|
|
|
+ u8 p_index)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int ret = 0;
|
|
|
+ struct clk_core *old_parent;
|
|
|
|
|
|
- if (core->enable_count == 0) {
|
|
|
- ret = clk_core_enable(core->parent);
|
|
|
+ old_parent = __clk_set_parent_before(core, parent);
|
|
|
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ trace_clk_set_parent(core, parent);
|
|
|
|
|
|
- trace_clk_enable(core);
|
|
|
+ /* change clock input source */
|
|
|
+ if (parent && core->ops->set_parent)
|
|
|
+ ret = core->ops->set_parent(core->hw, p_index);
|
|
|
|
|
|
- if (core->ops->enable)
|
|
|
- ret = core->ops->enable(core->hw);
|
|
|
+ trace_clk_set_parent_complete(core, parent);
|
|
|
|
|
|
- trace_clk_enable_complete(core);
|
|
|
+ if (ret) {
|
|
|
+ flags = clk_enable_lock();
|
|
|
+ clk_reparent(core, old_parent);
|
|
|
+ clk_enable_unlock(flags);
|
|
|
|
|
|
- if (ret) {
|
|
|
- clk_core_disable(core->parent);
|
|
|
- return ret;
|
|
|
+ if (core->prepare_count) {
|
|
|
+ clk_core_disable(core);
|
|
|
+ clk_core_disable(parent);
|
|
|
+ clk_core_unprepare(parent);
|
|
|
}
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- core->enable_count++;
|
|
|
+ __clk_set_parent_after(core, parent, old_parent);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * clk_enable - ungate a clock
|
|
|
- * @clk: the clk being ungated
|
|
|
+ * __clk_speculate_rates
|
|
|
+ * @core: first clk in the subtree
|
|
|
+ * @parent_rate: the "future" rate of clk's parent
|
|
|
*
|
|
|
- * clk_enable must not sleep, which differentiates it from clk_prepare. In a
|
|
|
- * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
|
|
|
- * if the operation will never sleep. One example is a SoC-internal clk which
|
|
|
- * is controlled via simple register writes. In the complex case a clk ungate
|
|
|
- * operation may require a fast and a slow part. It is this reason that
|
|
|
- * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
|
|
|
- * must be called before clk_enable. Returns 0 on success, -EERROR
|
|
|
- * otherwise.
|
|
|
+ * Walks the subtree of clks starting with clk, speculating rates as it
|
|
|
+ * goes and firing off PRE_RATE_CHANGE notifications as necessary.
|
|
|
+ *
|
|
|
+ * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
|
|
|
+ * pre-rate change notifications and returns early if no clks in the
|
|
|
+ * subtree have subscribed to the notifications. Note that if a clk does not
|
|
|
+ * implement the .recalc_rate callback then it is assumed that the clock will
|
|
|
+ * take on the rate of its parent.
|
|
|
+ *
|
|
|
+ * Caller must hold prepare_lock.
|
|
|
*/
|
|
|
-int clk_enable(struct clk *clk)
|
|
|
+static int __clk_speculate_rates(struct clk_core *core,
|
|
|
+ unsigned long parent_rate)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- int ret;
|
|
|
+ struct clk_core *child;
|
|
|
+ unsigned long new_rate;
|
|
|
+ int ret = NOTIFY_DONE;
|
|
|
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ lockdep_assert_held(&prepare_lock);
|
|
|
|
|
|
- flags = clk_enable_lock();
|
|
|
- ret = clk_core_enable(clk->core);
|
|
|
- clk_enable_unlock(flags);
|
|
|
+ new_rate = clk_recalc(core, parent_rate);
|
|
|
+
|
|
|
+ /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
|
|
|
+ if (core->notifier_count)
|
|
|
+ ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
|
|
|
+
|
|
|
+ if (ret & NOTIFY_STOP_MASK) {
|
|
|
+ pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
|
|
|
+ __func__, core->name, ret);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
+ ret = __clk_speculate_rates(child, new_rate);
|
|
|
+ if (ret & NOTIFY_STOP_MASK)
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
+out:
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_enable);
|
|
|
|
|
|
-static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
|
|
|
- unsigned long rate,
|
|
|
- unsigned long min_rate,
|
|
|
- unsigned long max_rate)
|
|
|
+static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
|
|
|
+ struct clk_core *new_parent, u8 p_index)
|
|
|
{
|
|
|
- unsigned long parent_rate = 0;
|
|
|
- struct clk_core *parent;
|
|
|
- struct clk_hw *parent_hw;
|
|
|
+ struct clk_core *child;
|
|
|
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
+ core->new_rate = new_rate;
|
|
|
+ core->new_parent = new_parent;
|
|
|
+ core->new_parent_index = p_index;
|
|
|
+ /* include clk in new parent's PRE_RATE_CHANGE notifications */
|
|
|
+ core->new_child = NULL;
|
|
|
+ if (new_parent && new_parent != core->parent)
|
|
|
+ new_parent->new_child = core;
|
|
|
|
|
|
- if (!core)
|
|
|
- return 0;
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
+ child->new_rate = clk_recalc(child, new_rate);
|
|
|
+ clk_calc_subtree(child, child->new_rate, NULL, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- parent = core->parent;
|
|
|
+/*
|
|
|
+ * calculate the new rates returning the topmost clock that has to be
|
|
|
+ * changed.
|
|
|
+ */
|
|
|
+static struct clk_core *clk_calc_new_rates(struct clk_core *core,
|
|
|
+ unsigned long rate)
|
|
|
+{
|
|
|
+ struct clk_core *top = core;
|
|
|
+ struct clk_core *old_parent, *parent;
|
|
|
+ struct clk_hw *parent_hw;
|
|
|
+ unsigned long best_parent_rate = 0;
|
|
|
+ unsigned long new_rate;
|
|
|
+ unsigned long min_rate;
|
|
|
+ unsigned long max_rate;
|
|
|
+ int p_index = 0;
|
|
|
+ long ret;
|
|
|
+
|
|
|
+ /* sanity */
|
|
|
+ if (IS_ERR_OR_NULL(core))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* save parent rate, if it exists */
|
|
|
+ parent = old_parent = core->parent;
|
|
|
if (parent)
|
|
|
- parent_rate = parent->rate;
|
|
|
+ best_parent_rate = parent->rate;
|
|
|
|
|
|
+ clk_core_get_boundaries(core, &min_rate, &max_rate);
|
|
|
+
|
|
|
+ /* find the closest rate and parent clk/rate */
|
|
|
if (core->ops->determine_rate) {
|
|
|
parent_hw = parent ? parent->hw : NULL;
|
|
|
- return core->ops->determine_rate(core->hw, rate,
|
|
|
- min_rate, max_rate,
|
|
|
- &parent_rate, &parent_hw);
|
|
|
- } else if (core->ops->round_rate)
|
|
|
- return core->ops->round_rate(core->hw, rate, &parent_rate);
|
|
|
- else if (core->flags & CLK_SET_RATE_PARENT)
|
|
|
- return clk_core_round_rate_nolock(core->parent, rate, min_rate,
|
|
|
- max_rate);
|
|
|
- else
|
|
|
- return core->rate;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * __clk_determine_rate - get the closest rate actually supported by a clock
|
|
|
- * @hw: determine the rate of this clock
|
|
|
- * @rate: target rate
|
|
|
- * @min_rate: returned rate must be greater than this rate
|
|
|
- * @max_rate: returned rate must be less than this rate
|
|
|
- *
|
|
|
- * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
|
|
|
- * .determine_rate.
|
|
|
- */
|
|
|
-unsigned long __clk_determine_rate(struct clk_hw *hw,
|
|
|
- unsigned long rate,
|
|
|
- unsigned long min_rate,
|
|
|
- unsigned long max_rate)
|
|
|
-{
|
|
|
- if (!hw)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_determine_rate);
|
|
|
-
|
|
|
-/**
|
|
|
- * __clk_round_rate - round the given rate for a clk
|
|
|
- * @clk: round the rate of this clock
|
|
|
- * @rate: the rate which is to be rounded
|
|
|
- *
|
|
|
- * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
|
|
|
- */
|
|
|
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
|
|
|
-{
|
|
|
- unsigned long min_rate;
|
|
|
- unsigned long max_rate;
|
|
|
+ ret = core->ops->determine_rate(core->hw, rate,
|
|
|
+ min_rate,
|
|
|
+ max_rate,
|
|
|
+ &best_parent_rate,
|
|
|
+ &parent_hw);
|
|
|
+ if (ret < 0)
|
|
|
+ return NULL;
|
|
|
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ new_rate = ret;
|
|
|
+ parent = parent_hw ? parent_hw->core : NULL;
|
|
|
+ } else if (core->ops->round_rate) {
|
|
|
+ ret = core->ops->round_rate(core->hw, rate,
|
|
|
+ &best_parent_rate);
|
|
|
+ if (ret < 0)
|
|
|
+ return NULL;
|
|
|
|
|
|
- clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
|
|
|
+ new_rate = ret;
|
|
|
+ if (new_rate < min_rate || new_rate > max_rate)
|
|
|
+ return NULL;
|
|
|
+ } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
|
|
|
+ /* pass-through clock without adjustable parent */
|
|
|
+ core->new_rate = core->rate;
|
|
|
+ return NULL;
|
|
|
+ } else {
|
|
|
+ /* pass-through clock with adjustable parent */
|
|
|
+ top = clk_calc_new_rates(parent, rate);
|
|
|
+ new_rate = parent->new_rate;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(__clk_round_rate);
|
|
|
+ /* some clocks must be gated to change parent */
|
|
|
+ if (parent != old_parent &&
|
|
|
+ (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
|
|
|
+ pr_debug("%s: %s not gated but wants to reparent\n",
|
|
|
+ __func__, core->name);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
-/**
|
|
|
- * clk_round_rate - round the given rate for a clk
|
|
|
- * @clk: the clk for which we are rounding a rate
|
|
|
- * @rate: the rate which is to be rounded
|
|
|
- *
|
|
|
- * Takes in a rate as input and rounds it to a rate that the clk can actually
|
|
|
- * use which is then returned. If clk doesn't support round_rate operation
|
|
|
- * then the parent rate is returned.
|
|
|
- */
|
|
|
-long clk_round_rate(struct clk *clk, unsigned long rate)
|
|
|
-{
|
|
|
- unsigned long ret;
|
|
|
+ /* try finding the new parent index */
|
|
|
+ if (parent && core->num_parents > 1) {
|
|
|
+ p_index = clk_fetch_parent_index(core, parent);
|
|
|
+ if (p_index < 0) {
|
|
|
+ pr_debug("%s: clk %s can not be parent of clk %s\n",
|
|
|
+ __func__, parent->name, core->name);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
|
|
|
+ best_parent_rate != parent->rate)
|
|
|
+ top = clk_calc_new_rates(parent, best_parent_rate);
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
- ret = __clk_round_rate(clk, rate);
|
|
|
- clk_prepare_unlock();
|
|
|
+out:
|
|
|
+ clk_calc_subtree(core, new_rate, parent, p_index);
|
|
|
|
|
|
- return ret;
|
|
|
+ return top;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_round_rate);
|
|
|
|
|
|
-/**
|
|
|
- * __clk_notify - call clk notifier chain
|
|
|
- * @core: clk that is changing rate
|
|
|
- * @msg: clk notifier type (see include/linux/clk.h)
|
|
|
- * @old_rate: old clk rate
|
|
|
- * @new_rate: new clk rate
|
|
|
- *
|
|
|
- * Triggers a notifier call chain on the clk rate-change notification
|
|
|
- * for 'clk'. Passes a pointer to the struct clk and the previous
|
|
|
- * and current rates to the notifier callback. Intended to be called by
|
|
|
- * internal clock code only. Returns NOTIFY_DONE from the last driver
|
|
|
- * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
|
|
|
- * a driver returns that.
|
|
|
+/*
|
|
|
+ * Notify about rate changes in a subtree. Always walk down the whole tree
|
|
|
+ * so that in case of an error we can walk down the whole tree again and
|
|
|
+ * abort the change.
|
|
|
*/
|
|
|
-static int __clk_notify(struct clk_core *core, unsigned long msg,
|
|
|
- unsigned long old_rate, unsigned long new_rate)
|
|
|
+static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
|
|
|
+ unsigned long event)
|
|
|
{
|
|
|
- struct clk_notifier *cn;
|
|
|
- struct clk_notifier_data cnd;
|
|
|
+ struct clk_core *child, *tmp_clk, *fail_clk = NULL;
|
|
|
int ret = NOTIFY_DONE;
|
|
|
|
|
|
- cnd.old_rate = old_rate;
|
|
|
- cnd.new_rate = new_rate;
|
|
|
+ if (core->rate == core->new_rate)
|
|
|
+ return NULL;
|
|
|
|
|
|
- list_for_each_entry(cn, &clk_notifier_list, node) {
|
|
|
- if (cn->clk->core == core) {
|
|
|
- cnd.clk = cn->clk;
|
|
|
- ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
|
|
|
- &cnd);
|
|
|
- }
|
|
|
+ if (core->notifier_count) {
|
|
|
+ ret = __clk_notify(core, event, core->rate, core->new_rate);
|
|
|
+ if (ret & NOTIFY_STOP_MASK)
|
|
|
+ fail_clk = core;
|
|
|
}
|
|
|
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * __clk_recalc_accuracies
|
|
|
- * @core: first clk in the subtree
|
|
|
- *
|
|
|
- * Walks the subtree of clks starting with clk and recalculates accuracies as
|
|
|
- * it goes. Note that if a clk does not implement the .recalc_accuracy
|
|
|
- * callback then it is assumed that the clock will take on the accuracy of it's
|
|
|
- * parent.
|
|
|
- *
|
|
|
- * Caller must hold prepare_lock.
|
|
|
- */
|
|
|
-static void __clk_recalc_accuracies(struct clk_core *core)
|
|
|
-{
|
|
|
- unsigned long parent_accuracy = 0;
|
|
|
- struct clk_core *child;
|
|
|
-
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
-
|
|
|
- if (core->parent)
|
|
|
- parent_accuracy = core->parent->accuracy;
|
|
|
+ hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
+ /* Skip children who will be reparented to another clock */
|
|
|
+ if (child->new_parent && child->new_parent != core)
|
|
|
+ continue;
|
|
|
+ tmp_clk = clk_propagate_rate_change(child, event);
|
|
|
+ if (tmp_clk)
|
|
|
+ fail_clk = tmp_clk;
|
|
|
+ }
|
|
|
|
|
|
- if (core->ops->recalc_accuracy)
|
|
|
- core->accuracy = core->ops->recalc_accuracy(core->hw,
|
|
|
- parent_accuracy);
|
|
|
- else
|
|
|
- core->accuracy = parent_accuracy;
|
|
|
+ /* handle the new child who might not be in core->children yet */
|
|
|
+ if (core->new_child) {
|
|
|
+ tmp_clk = clk_propagate_rate_change(core->new_child, event);
|
|
|
+ if (tmp_clk)
|
|
|
+ fail_clk = tmp_clk;
|
|
|
+ }
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node)
|
|
|
- __clk_recalc_accuracies(child);
|
|
|
+ return fail_clk;
|
|
|
}
|
|
|
|
|
|
-static long clk_core_get_accuracy(struct clk_core *core)
|
|
|
+/*
|
|
|
+ * walk down a subtree and set the new rates notifying the rate
|
|
|
+ * change on the way
|
|
|
+ */
|
|
|
+static void clk_change_rate(struct clk_core *core)
|
|
|
{
|
|
|
- unsigned long accuracy;
|
|
|
-
|
|
|
- clk_prepare_lock();
|
|
|
- if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
|
|
|
- __clk_recalc_accuracies(core);
|
|
|
+ struct clk_core *child;
|
|
|
+ struct hlist_node *tmp;
|
|
|
+ unsigned long old_rate;
|
|
|
+ unsigned long best_parent_rate = 0;
|
|
|
+ bool skip_set_rate = false;
|
|
|
+ struct clk_core *old_parent;
|
|
|
|
|
|
- accuracy = __clk_get_accuracy(core);
|
|
|
- clk_prepare_unlock();
|
|
|
+ old_rate = core->rate;
|
|
|
|
|
|
- return accuracy;
|
|
|
-}
|
|
|
+ if (core->new_parent)
|
|
|
+ best_parent_rate = core->new_parent->rate;
|
|
|
+ else if (core->parent)
|
|
|
+ best_parent_rate = core->parent->rate;
|
|
|
|
|
|
-/**
|
|
|
- * clk_get_accuracy - return the accuracy of clk
|
|
|
- * @clk: the clk whose accuracy is being returned
|
|
|
- *
|
|
|
- * Simply returns the cached accuracy of the clk, unless
|
|
|
- * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
|
|
|
- * issued.
|
|
|
- * If clk is NULL then returns 0.
|
|
|
- */
|
|
|
-long clk_get_accuracy(struct clk *clk)
|
|
|
-{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ if (core->new_parent && core->new_parent != core->parent) {
|
|
|
+ old_parent = __clk_set_parent_before(core, core->new_parent);
|
|
|
+ trace_clk_set_parent(core, core->new_parent);
|
|
|
|
|
|
- return clk_core_get_accuracy(clk->core);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_get_accuracy);
|
|
|
+ if (core->ops->set_rate_and_parent) {
|
|
|
+ skip_set_rate = true;
|
|
|
+ core->ops->set_rate_and_parent(core->hw, core->new_rate,
|
|
|
+ best_parent_rate,
|
|
|
+ core->new_parent_index);
|
|
|
+ } else if (core->ops->set_parent) {
|
|
|
+ core->ops->set_parent(core->hw, core->new_parent_index);
|
|
|
+ }
|
|
|
|
|
|
-static unsigned long clk_recalc(struct clk_core *core,
|
|
|
- unsigned long parent_rate)
|
|
|
-{
|
|
|
- if (core->ops->recalc_rate)
|
|
|
- return core->ops->recalc_rate(core->hw, parent_rate);
|
|
|
- return parent_rate;
|
|
|
-}
|
|
|
+ trace_clk_set_parent_complete(core, core->new_parent);
|
|
|
+ __clk_set_parent_after(core, core->new_parent, old_parent);
|
|
|
+ }
|
|
|
|
|
|
-/**
|
|
|
- * __clk_recalc_rates
|
|
|
- * @core: first clk in the subtree
|
|
|
- * @msg: notification type (see include/linux/clk.h)
|
|
|
- *
|
|
|
- * Walks the subtree of clks starting with clk and recalculates rates as it
|
|
|
- * goes. Note that if a clk does not implement the .recalc_rate callback then
|
|
|
- * it is assumed that the clock will take on the rate of its parent.
|
|
|
- *
|
|
|
- * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
|
|
|
- * if necessary.
|
|
|
- *
|
|
|
- * Caller must hold prepare_lock.
|
|
|
- */
|
|
|
-static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
|
|
|
-{
|
|
|
- unsigned long old_rate;
|
|
|
- unsigned long parent_rate = 0;
|
|
|
- struct clk_core *child;
|
|
|
+ trace_clk_set_rate(core, core->new_rate);
|
|
|
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
+ if (!skip_set_rate && core->ops->set_rate)
|
|
|
+ core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
|
|
|
|
|
|
- old_rate = core->rate;
|
|
|
+ trace_clk_set_rate_complete(core, core->new_rate);
|
|
|
|
|
|
- if (core->parent)
|
|
|
- parent_rate = core->parent->rate;
|
|
|
+ core->rate = clk_recalc(core, best_parent_rate);
|
|
|
|
|
|
- core->rate = clk_recalc(core, parent_rate);
|
|
|
+ if (core->notifier_count && old_rate != core->rate)
|
|
|
+ __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
|
|
|
|
|
|
/*
|
|
|
- * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
|
|
|
- * & ABORT_RATE_CHANGE notifiers
|
|
|
+ * Use safe iteration, as change_rate can actually swap parents
|
|
|
+ * for certain clock types.
|
|
|
*/
|
|
|
- if (core->notifier_count && msg)
|
|
|
- __clk_notify(core, msg, old_rate, core->rate);
|
|
|
+ hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
|
|
|
+ /* Skip children who will be reparented to another clock */
|
|
|
+ if (child->new_parent && child->new_parent != core)
|
|
|
+ continue;
|
|
|
+ clk_change_rate(child);
|
|
|
+ }
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node)
|
|
|
- __clk_recalc_rates(child, msg);
|
|
|
+ /* handle the new child who might not be in core->children yet */
|
|
|
+ if (core->new_child)
|
|
|
+ clk_change_rate(core->new_child);
|
|
|
}
|
|
|
|
|
|
-static unsigned long clk_core_get_rate(struct clk_core *core)
|
|
|
+static int clk_core_set_rate_nolock(struct clk_core *core,
|
|
|
+ unsigned long req_rate)
|
|
|
{
|
|
|
- unsigned long rate;
|
|
|
+ struct clk_core *top, *fail_clk;
|
|
|
+ unsigned long rate = req_rate;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
+ if (!core)
|
|
|
+ return 0;
|
|
|
|
|
|
- if (core && (core->flags & CLK_GET_RATE_NOCACHE))
|
|
|
- __clk_recalc_rates(core, 0);
|
|
|
+ /* bail early if nothing to do */
|
|
|
+ if (rate == clk_core_get_rate_nolock(core))
|
|
|
+ return 0;
|
|
|
|
|
|
- rate = clk_core_get_rate_nolock(core);
|
|
|
- clk_prepare_unlock();
|
|
|
+ if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
|
|
|
+ return -EBUSY;
|
|
|
|
|
|
- return rate;
|
|
|
+ /* calculate new rates and get the topmost changed clock */
|
|
|
+ top = clk_calc_new_rates(core, rate);
|
|
|
+ if (!top)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* notify that we are about to change rates */
|
|
|
+ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
|
|
|
+ if (fail_clk) {
|
|
|
+ pr_debug("%s: failed to set %s rate\n", __func__,
|
|
|
+ fail_clk->name);
|
|
|
+ clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* change the rates */
|
|
|
+ clk_change_rate(top);
|
|
|
+
|
|
|
+ core->req_rate = req_rate;
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * clk_get_rate - return the rate of clk
|
|
|
- * @clk: the clk whose rate is being returned
|
|
|
+ * clk_set_rate - specify a new rate for clk
|
|
|
+ * @clk: the clk whose rate is being changed
|
|
|
+ * @rate: the new rate for clk
|
|
|
*
|
|
|
- * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
|
|
|
- * is set, which means a recalc_rate will be issued.
|
|
|
- * If clk is NULL then returns 0.
|
|
|
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
|
|
|
+ *
|
|
|
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
|
|
|
+ * propagate up to clk's parent; whether or not this happens depends on the
|
|
|
+ * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
|
|
|
+ * after calling .round_rate then upstream parent propagation is ignored. If
|
|
|
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
|
|
|
+ * up to clk's parent and set its rate. Upward propagation will continue
|
|
|
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
|
|
|
+ * .round_rate stops requesting changes to clk's parent_rate.
|
|
|
+ *
|
|
|
+ * Rate changes are accomplished via tree traversal that also recalculates the
|
|
|
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
|
|
|
+ *
|
|
|
+ * Returns 0 on success, -EERROR otherwise.
|
|
|
*/
|
|
|
-unsigned long clk_get_rate(struct clk *clk)
|
|
|
+int clk_set_rate(struct clk *clk, unsigned long rate)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
if (!clk)
|
|
|
return 0;
|
|
|
|
|
|
- return clk_core_get_rate(clk->core);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_get_rate);
|
|
|
-
|
|
|
-static int clk_fetch_parent_index(struct clk_core *core,
|
|
|
- struct clk_core *parent)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- if (!core->parents) {
|
|
|
- core->parents = kcalloc(core->num_parents,
|
|
|
- sizeof(struct clk *), GFP_KERNEL);
|
|
|
- if (!core->parents)
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * find index of new parent clock using cached parent ptrs,
|
|
|
- * or if not yet cached, use string name comparison and cache
|
|
|
- * them now to avoid future calls to clk_core_lookup.
|
|
|
- */
|
|
|
- for (i = 0; i < core->num_parents; i++) {
|
|
|
- if (core->parents[i] == parent)
|
|
|
- return i;
|
|
|
+ /* prevent racing with updates to the clock topology */
|
|
|
+ clk_prepare_lock();
|
|
|
|
|
|
- if (core->parents[i])
|
|
|
- continue;
|
|
|
+ ret = clk_core_set_rate_nolock(clk->core, rate);
|
|
|
|
|
|
- if (!strcmp(core->parent_names[i], parent->name)) {
|
|
|
- core->parents[i] = clk_core_lookup(parent->name);
|
|
|
- return i;
|
|
|
- }
|
|
|
- }
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- return -EINVAL;
|
|
|
+ return ret;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_rate);
|
|
|
|
|
|
-static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
|
|
|
+/**
|
|
|
+ * clk_set_rate_range - set a rate range for a clock source
|
|
|
+ * @clk: clock source
|
|
|
+ * @min: desired minimum clock rate in Hz, inclusive
|
|
|
+ * @max: desired maximum clock rate in Hz, inclusive
|
|
|
+ *
|
|
|
+ * Returns success (0) or negative errno.
|
|
|
+ */
|
|
|
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
|
|
|
{
|
|
|
- hlist_del(&core->child_node);
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- if (new_parent) {
|
|
|
- /* avoid duplicate POST_RATE_CHANGE notifications */
|
|
|
- if (new_parent->new_child == core)
|
|
|
- new_parent->new_child = NULL;
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- hlist_add_head(&core->child_node, &new_parent->children);
|
|
|
- } else {
|
|
|
- hlist_add_head(&core->child_node, &clk_orphan_list);
|
|
|
+ if (min > max) {
|
|
|
+ pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
|
|
|
+ __func__, clk->core->name, clk->dev_id, clk->con_id,
|
|
|
+ min, max);
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- core->parent = new_parent;
|
|
|
-}
|
|
|
-
|
|
|
-static struct clk_core *__clk_set_parent_before(struct clk_core *core,
|
|
|
- struct clk_core *parent)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- struct clk_core *old_parent = core->parent;
|
|
|
+ clk_prepare_lock();
|
|
|
|
|
|
- /*
|
|
|
- * Migrate prepare state between parents and prevent race with
|
|
|
- * clk_enable().
|
|
|
- *
|
|
|
- * If the clock is not prepared, then a race with
|
|
|
- * clk_enable/disable() is impossible since we already have the
|
|
|
- * prepare lock (future calls to clk_enable() need to be preceded by
|
|
|
- * a clk_prepare()).
|
|
|
- *
|
|
|
- * If the clock is prepared, migrate the prepared state to the new
|
|
|
- * parent and also protect against a race with clk_enable() by
|
|
|
- * forcing the clock and the new parent on. This ensures that all
|
|
|
- * future calls to clk_enable() are practically NOPs with respect to
|
|
|
- * hardware and software states.
|
|
|
- *
|
|
|
- * See also: Comment for clk_set_parent() below.
|
|
|
- */
|
|
|
- if (core->prepare_count) {
|
|
|
- clk_core_prepare(parent);
|
|
|
- clk_core_enable(parent);
|
|
|
- clk_core_enable(core);
|
|
|
+ if (min != clk->min_rate || max != clk->max_rate) {
|
|
|
+ clk->min_rate = min;
|
|
|
+ clk->max_rate = max;
|
|
|
+ ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
|
|
|
}
|
|
|
|
|
|
- /* update the clk tree topology */
|
|
|
- flags = clk_enable_lock();
|
|
|
- clk_reparent(core, parent);
|
|
|
- clk_enable_unlock(flags);
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- return old_parent;
|
|
|
+ return ret;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_rate_range);
|
|
|
|
|
|
-static void __clk_set_parent_after(struct clk_core *core,
|
|
|
- struct clk_core *parent,
|
|
|
- struct clk_core *old_parent)
|
|
|
+/**
|
|
|
+ * clk_set_min_rate - set a minimum clock rate for a clock source
|
|
|
+ * @clk: clock source
|
|
|
+ * @rate: desired minimum clock rate in Hz, inclusive
|
|
|
+ *
|
|
|
+ * Returns success (0) or negative errno.
|
|
|
+ */
|
|
|
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
|
|
|
{
|
|
|
- /*
|
|
|
- * Finish the migration of prepare state and undo the changes done
|
|
|
- * for preventing a race with clk_enable().
|
|
|
- */
|
|
|
- if (core->prepare_count) {
|
|
|
- clk_core_disable(core);
|
|
|
- clk_core_disable(old_parent);
|
|
|
- clk_core_unprepare(old_parent);
|
|
|
- }
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return clk_set_rate_range(clk, rate, clk->max_rate);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_min_rate);
|
|
|
|
|
|
-static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
|
|
|
- u8 p_index)
|
|
|
+/**
|
|
|
+ * clk_set_max_rate - set a maximum clock rate for a clock source
|
|
|
+ * @clk: clock source
|
|
|
+ * @rate: desired maximum clock rate in Hz, inclusive
|
|
|
+ *
|
|
|
+ * Returns success (0) or negative errno.
|
|
|
+ */
|
|
|
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- int ret = 0;
|
|
|
- struct clk_core *old_parent;
|
|
|
-
|
|
|
- old_parent = __clk_set_parent_before(core, parent);
|
|
|
-
|
|
|
- trace_clk_set_parent(core, parent);
|
|
|
-
|
|
|
- /* change clock input source */
|
|
|
- if (parent && core->ops->set_parent)
|
|
|
- ret = core->ops->set_parent(core->hw, p_index);
|
|
|
-
|
|
|
- trace_clk_set_parent_complete(core, parent);
|
|
|
-
|
|
|
- if (ret) {
|
|
|
- flags = clk_enable_lock();
|
|
|
- clk_reparent(core, old_parent);
|
|
|
- clk_enable_unlock(flags);
|
|
|
-
|
|
|
- if (core->prepare_count) {
|
|
|
- clk_core_disable(core);
|
|
|
- clk_core_disable(parent);
|
|
|
- clk_core_unprepare(parent);
|
|
|
- }
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
- __clk_set_parent_after(core, parent, old_parent);
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- return 0;
|
|
|
+ return clk_set_rate_range(clk, clk->min_rate, rate);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_max_rate);
|
|
|
|
|
|
/**
|
|
|
- * __clk_speculate_rates
|
|
|
- * @core: first clk in the subtree
|
|
|
- * @parent_rate: the "future" rate of clk's parent
|
|
|
- *
|
|
|
- * Walks the subtree of clks starting with clk, speculating rates as it
|
|
|
- * goes and firing off PRE_RATE_CHANGE notifications as necessary.
|
|
|
- *
|
|
|
- * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
|
|
|
- * pre-rate change notifications and returns early if no clks in the
|
|
|
- * subtree have subscribed to the notifications. Note that if a clk does not
|
|
|
- * implement the .recalc_rate callback then it is assumed that the clock will
|
|
|
- * take on the rate of its parent.
|
|
|
+ * clk_get_parent - return the parent of a clk
|
|
|
+ * @clk: the clk whose parent gets returned
|
|
|
*
|
|
|
- * Caller must hold prepare_lock.
|
|
|
+ * Simply returns clk->parent. Returns NULL if clk is NULL.
|
|
|
*/
|
|
|
-static int __clk_speculate_rates(struct clk_core *core,
|
|
|
- unsigned long parent_rate)
|
|
|
+struct clk *clk_get_parent(struct clk *clk)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
- unsigned long new_rate;
|
|
|
- int ret = NOTIFY_DONE;
|
|
|
+ struct clk *parent;
|
|
|
|
|
|
- lockdep_assert_held(&prepare_lock);
|
|
|
+ clk_prepare_lock();
|
|
|
+ parent = __clk_get_parent(clk);
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- new_rate = clk_recalc(core, parent_rate);
|
|
|
+ return parent;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_get_parent);
|
|
|
|
|
|
- /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
|
|
|
- if (core->notifier_count)
|
|
|
- ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
|
|
|
+/*
|
|
|
+ * .get_parent is mandatory for clocks with multiple possible parents. It is
|
|
|
+ * optional for single-parent clocks. Always call .get_parent if it is
|
|
|
+ * available and WARN if it is missing for multi-parent clocks.
|
|
|
+ *
|
|
|
+ * For single-parent clocks without .get_parent, first check to see if the
|
|
|
+ * .parents array exists, and if so use it to avoid an expensive tree
|
|
|
+ * traversal. If .parents does not exist then walk the tree.
|
|
|
+ */
|
|
|
+static struct clk_core *__clk_init_parent(struct clk_core *core)
|
|
|
+{
|
|
|
+ struct clk_core *ret = NULL;
|
|
|
+ u8 index;
|
|
|
|
|
|
- if (ret & NOTIFY_STOP_MASK) {
|
|
|
- pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
|
|
|
- __func__, core->name, ret);
|
|
|
+ /* handle the trivial cases */
|
|
|
+
|
|
|
+ if (!core->num_parents)
|
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
- ret = __clk_speculate_rates(child, new_rate);
|
|
|
- if (ret & NOTIFY_STOP_MASK)
|
|
|
- break;
|
|
|
+ if (core->num_parents == 1) {
|
|
|
+ if (IS_ERR_OR_NULL(core->parent))
|
|
|
+ core->parent = clk_core_lookup(core->parent_names[0]);
|
|
|
+ ret = core->parent;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
+ if (!core->ops->get_parent) {
|
|
|
+ WARN(!core->ops->get_parent,
|
|
|
+ "%s: multi-parent clocks must implement .get_parent\n",
|
|
|
+ __func__);
|
|
|
+ goto out;
|
|
|
+ };
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Do our best to cache parent clocks in core->parents. This prevents
|
|
|
+ * unnecessary and expensive lookups. We don't set core->parent here;
|
|
|
+ * that is done by the calling function.
|
|
|
+ */
|
|
|
+
|
|
|
+ index = core->ops->get_parent(core->hw);
|
|
|
+
|
|
|
+ if (!core->parents)
|
|
|
+ core->parents =
|
|
|
+ kcalloc(core->num_parents, sizeof(struct clk *),
|
|
|
+ GFP_KERNEL);
|
|
|
+
|
|
|
+ ret = clk_core_get_parent_by_index(core, index);
|
|
|
+
|
|
|
out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
|
|
|
- struct clk_core *new_parent, u8 p_index)
|
|
|
+static void clk_core_reparent(struct clk_core *core,
|
|
|
+ struct clk_core *new_parent)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
-
|
|
|
- core->new_rate = new_rate;
|
|
|
- core->new_parent = new_parent;
|
|
|
- core->new_parent_index = p_index;
|
|
|
- /* include clk in new parent's PRE_RATE_CHANGE notifications */
|
|
|
- core->new_child = NULL;
|
|
|
- if (new_parent && new_parent != core->parent)
|
|
|
- new_parent->new_child = core;
|
|
|
-
|
|
|
- hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
- child->new_rate = clk_recalc(child, new_rate);
|
|
|
- clk_calc_subtree(child, child->new_rate, NULL, 0);
|
|
|
- }
|
|
|
+ clk_reparent(core, new_parent);
|
|
|
+ __clk_recalc_accuracies(core);
|
|
|
+ __clk_recalc_rates(core, POST_RATE_CHANGE);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * calculate the new rates returning the topmost clock that has to be
|
|
|
- * changed.
|
|
|
+/**
|
|
|
+ * clk_has_parent - check if a clock is a possible parent for another
|
|
|
+ * @clk: clock source
|
|
|
+ * @parent: parent clock source
|
|
|
+ *
|
|
|
+ * This function can be used in drivers that need to check that a clock can be
|
|
|
+ * the parent of another without actually changing the parent.
|
|
|
+ *
|
|
|
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
|
|
|
*/
|
|
|
-static struct clk_core *clk_calc_new_rates(struct clk_core *core,
|
|
|
- unsigned long rate)
|
|
|
+bool clk_has_parent(struct clk *clk, struct clk *parent)
|
|
|
{
|
|
|
- struct clk_core *top = core;
|
|
|
- struct clk_core *old_parent, *parent;
|
|
|
- struct clk_hw *parent_hw;
|
|
|
- unsigned long best_parent_rate = 0;
|
|
|
- unsigned long new_rate;
|
|
|
- unsigned long min_rate;
|
|
|
- unsigned long max_rate;
|
|
|
- int p_index = 0;
|
|
|
- long ret;
|
|
|
+ struct clk_core *core, *parent_core;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
- /* sanity */
|
|
|
- if (IS_ERR_OR_NULL(core))
|
|
|
- return NULL;
|
|
|
+ /* NULL clocks should be nops, so return success if either is NULL. */
|
|
|
+ if (!clk || !parent)
|
|
|
+ return true;
|
|
|
|
|
|
- /* save parent rate, if it exists */
|
|
|
- parent = old_parent = core->parent;
|
|
|
- if (parent)
|
|
|
- best_parent_rate = parent->rate;
|
|
|
+ core = clk->core;
|
|
|
+ parent_core = parent->core;
|
|
|
|
|
|
- clk_core_get_boundaries(core, &min_rate, &max_rate);
|
|
|
+ /* Optimize for the case where the parent is already the parent. */
|
|
|
+ if (core->parent == parent_core)
|
|
|
+ return true;
|
|
|
|
|
|
- /* find the closest rate and parent clk/rate */
|
|
|
- if (core->ops->determine_rate) {
|
|
|
- parent_hw = parent ? parent->hw : NULL;
|
|
|
- ret = core->ops->determine_rate(core->hw, rate,
|
|
|
- min_rate,
|
|
|
- max_rate,
|
|
|
- &best_parent_rate,
|
|
|
- &parent_hw);
|
|
|
- if (ret < 0)
|
|
|
- return NULL;
|
|
|
+ for (i = 0; i < core->num_parents; i++)
|
|
|
+ if (strcmp(core->parent_names[i], parent_core->name) == 0)
|
|
|
+ return true;
|
|
|
|
|
|
- new_rate = ret;
|
|
|
- parent = parent_hw ? parent_hw->core : NULL;
|
|
|
- } else if (core->ops->round_rate) {
|
|
|
- ret = core->ops->round_rate(core->hw, rate,
|
|
|
- &best_parent_rate);
|
|
|
- if (ret < 0)
|
|
|
- return NULL;
|
|
|
+ return false;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_has_parent);
|
|
|
|
|
|
- new_rate = ret;
|
|
|
- if (new_rate < min_rate || new_rate > max_rate)
|
|
|
- return NULL;
|
|
|
- } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
|
|
|
- /* pass-through clock without adjustable parent */
|
|
|
- core->new_rate = core->rate;
|
|
|
- return NULL;
|
|
|
- } else {
|
|
|
- /* pass-through clock with adjustable parent */
|
|
|
- top = clk_calc_new_rates(parent, rate);
|
|
|
- new_rate = parent->new_rate;
|
|
|
+static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+ int p_index = 0;
|
|
|
+ unsigned long p_rate = 0;
|
|
|
+
|
|
|
+ if (!core)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* prevent racing with updates to the clock topology */
|
|
|
+ clk_prepare_lock();
|
|
|
+
|
|
|
+ if (core->parent == parent)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /* verify ops for for multi-parent clks */
|
|
|
+ if ((core->num_parents > 1) && (!core->ops->set_parent)) {
|
|
|
+ ret = -ENOSYS;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- /* some clocks must be gated to change parent */
|
|
|
- if (parent != old_parent &&
|
|
|
- (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
|
|
|
- pr_debug("%s: %s not gated but wants to reparent\n",
|
|
|
- __func__, core->name);
|
|
|
- return NULL;
|
|
|
+ /* check that we are allowed to re-parent if the clock is in use */
|
|
|
+ if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
/* try finding the new parent index */
|
|
|
- if (parent && core->num_parents > 1) {
|
|
|
+ if (parent) {
|
|
|
p_index = clk_fetch_parent_index(core, parent);
|
|
|
+ p_rate = parent->rate;
|
|
|
if (p_index < 0) {
|
|
|
pr_debug("%s: clk %s can not be parent of clk %s\n",
|
|
|
- __func__, parent->name, core->name);
|
|
|
- return NULL;
|
|
|
+ __func__, parent->name, core->name);
|
|
|
+ ret = p_index;
|
|
|
+ goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
|
|
|
- best_parent_rate != parent->rate)
|
|
|
- top = clk_calc_new_rates(parent, best_parent_rate);
|
|
|
-
|
|
|
-out:
|
|
|
- clk_calc_subtree(core, new_rate, parent, p_index);
|
|
|
-
|
|
|
- return top;
|
|
|
-}
|
|
|
+ /* propagate PRE_RATE_CHANGE notifications */
|
|
|
+ ret = __clk_speculate_rates(core, p_rate);
|
|
|
|
|
|
-/*
|
|
|
- * Notify about rate changes in a subtree. Always walk down the whole tree
|
|
|
- * so that in case of an error we can walk down the whole tree again and
|
|
|
- * abort the change.
|
|
|
- */
|
|
|
-static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
|
|
|
- unsigned long event)
|
|
|
-{
|
|
|
- struct clk_core *child, *tmp_clk, *fail_clk = NULL;
|
|
|
- int ret = NOTIFY_DONE;
|
|
|
+ /* abort if a driver objects */
|
|
|
+ if (ret & NOTIFY_STOP_MASK)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (core->rate == core->new_rate)
|
|
|
- return NULL;
|
|
|
+ /* do the re-parent */
|
|
|
+ ret = __clk_set_parent(core, parent, p_index);
|
|
|
|
|
|
- if (core->notifier_count) {
|
|
|
- ret = __clk_notify(core, event, core->rate, core->new_rate);
|
|
|
- if (ret & NOTIFY_STOP_MASK)
|
|
|
- fail_clk = core;
|
|
|
+ /* propagate rate an accuracy recalculation accordingly */
|
|
|
+ if (ret) {
|
|
|
+ __clk_recalc_rates(core, ABORT_RATE_CHANGE);
|
|
|
+ } else {
|
|
|
+ __clk_recalc_rates(core, POST_RATE_CHANGE);
|
|
|
+ __clk_recalc_accuracies(core);
|
|
|
}
|
|
|
|
|
|
- hlist_for_each_entry(child, &core->children, child_node) {
|
|
|
- /* Skip children who will be reparented to another clock */
|
|
|
- if (child->new_parent && child->new_parent != core)
|
|
|
- continue;
|
|
|
- tmp_clk = clk_propagate_rate_change(child, event);
|
|
|
- if (tmp_clk)
|
|
|
- fail_clk = tmp_clk;
|
|
|
- }
|
|
|
+out:
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- /* handle the new child who might not be in core->children yet */
|
|
|
- if (core->new_child) {
|
|
|
- tmp_clk = clk_propagate_rate_change(core->new_child, event);
|
|
|
- if (tmp_clk)
|
|
|
- fail_clk = tmp_clk;
|
|
|
- }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- return fail_clk;
|
|
|
+/**
|
|
|
+ * clk_set_parent - switch the parent of a mux clk
|
|
|
+ * @clk: the mux clk whose input we are switching
|
|
|
+ * @parent: the new input to clk
|
|
|
+ *
|
|
|
+ * Re-parent clk to use parent as its new input source. If clk is in
|
|
|
+ * prepared state, the clk will get enabled for the duration of this call. If
|
|
|
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
|
|
|
+ * that, the reparenting is glitchy in hardware, etc), use the
|
|
|
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
|
|
|
+ *
|
|
|
+ * After successfully changing clk's parent clk_set_parent will update the
|
|
|
+ * clk topology, sysfs topology and propagate rate recalculation via
|
|
|
+ * __clk_recalc_rates.
|
|
|
+ *
|
|
|
+ * Returns 0 on success, -EERROR otherwise.
|
|
|
+ */
|
|
|
+int clk_set_parent(struct clk *clk, struct clk *parent)
|
|
|
+{
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_parent);
|
|
|
|
|
|
-/*
|
|
|
- * walk down a subtree and set the new rates notifying the rate
|
|
|
- * change on the way
|
|
|
+/**
|
|
|
+ * clk_set_phase - adjust the phase shift of a clock signal
|
|
|
+ * @clk: clock signal source
|
|
|
+ * @degrees: number of degrees the signal is shifted
|
|
|
+ *
|
|
|
+ * Shifts the phase of a clock signal by the specified
|
|
|
+ * degrees. Returns 0 on success, -EERROR otherwise.
|
|
|
+ *
|
|
|
+ * This function makes no distinction about the input or reference
|
|
|
+ * signal that we adjust the clock signal phase against. For example
|
|
|
+ * phase locked-loop clock signal generators we may shift phase with
|
|
|
+ * respect to feedback clock signal input, but for other cases the
|
|
|
+ * clock phase may be shifted with respect to some other, unspecified
|
|
|
+ * signal.
|
|
|
+ *
|
|
|
+ * Additionally the concept of phase shift does not propagate through
|
|
|
+ * the clock tree hierarchy, which sets it apart from clock rates and
|
|
|
+ * clock accuracy. A parent clock phase attribute does not have an
|
|
|
+ * impact on the phase attribute of a child clock.
|
|
|
*/
|
|
|
-static void clk_change_rate(struct clk_core *core)
|
|
|
+int clk_set_phase(struct clk *clk, int degrees)
|
|
|
{
|
|
|
- struct clk_core *child;
|
|
|
- struct hlist_node *tmp;
|
|
|
- unsigned long old_rate;
|
|
|
- unsigned long best_parent_rate = 0;
|
|
|
- bool skip_set_rate = false;
|
|
|
- struct clk_core *old_parent;
|
|
|
+ int ret = -EINVAL;
|
|
|
|
|
|
- old_rate = core->rate;
|
|
|
+ if (!clk)
|
|
|
+ return 0;
|
|
|
|
|
|
- if (core->new_parent)
|
|
|
- best_parent_rate = core->new_parent->rate;
|
|
|
- else if (core->parent)
|
|
|
- best_parent_rate = core->parent->rate;
|
|
|
+ /* sanity check degrees */
|
|
|
+ degrees %= 360;
|
|
|
+ if (degrees < 0)
|
|
|
+ degrees += 360;
|
|
|
|
|
|
- if (core->new_parent && core->new_parent != core->parent) {
|
|
|
- old_parent = __clk_set_parent_before(core, core->new_parent);
|
|
|
- trace_clk_set_parent(core, core->new_parent);
|
|
|
+ clk_prepare_lock();
|
|
|
|
|
|
- if (core->ops->set_rate_and_parent) {
|
|
|
- skip_set_rate = true;
|
|
|
- core->ops->set_rate_and_parent(core->hw, core->new_rate,
|
|
|
- best_parent_rate,
|
|
|
- core->new_parent_index);
|
|
|
- } else if (core->ops->set_parent) {
|
|
|
- core->ops->set_parent(core->hw, core->new_parent_index);
|
|
|
- }
|
|
|
+ trace_clk_set_phase(clk->core, degrees);
|
|
|
|
|
|
- trace_clk_set_parent_complete(core, core->new_parent);
|
|
|
- __clk_set_parent_after(core, core->new_parent, old_parent);
|
|
|
- }
|
|
|
+ if (clk->core->ops->set_phase)
|
|
|
+ ret = clk->core->ops->set_phase(clk->core->hw, degrees);
|
|
|
|
|
|
- trace_clk_set_rate(core, core->new_rate);
|
|
|
+ trace_clk_set_phase_complete(clk->core, degrees);
|
|
|
|
|
|
- if (!skip_set_rate && core->ops->set_rate)
|
|
|
- core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
|
|
|
+ if (!ret)
|
|
|
+ clk->core->phase = degrees;
|
|
|
|
|
|
- trace_clk_set_rate_complete(core, core->new_rate);
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- core->rate = clk_recalc(core, best_parent_rate);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_set_phase);
|
|
|
|
|
|
- if (core->notifier_count && old_rate != core->rate)
|
|
|
- __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
|
|
|
+static int clk_core_get_phase(struct clk_core *core)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
|
|
|
- /*
|
|
|
- * Use safe iteration, as change_rate can actually swap parents
|
|
|
- * for certain clock types.
|
|
|
- */
|
|
|
- hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
|
|
|
- /* Skip children who will be reparented to another clock */
|
|
|
- if (child->new_parent && child->new_parent != core)
|
|
|
- continue;
|
|
|
- clk_change_rate(child);
|
|
|
- }
|
|
|
+ clk_prepare_lock();
|
|
|
+ ret = core->phase;
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- /* handle the new child who might not be in core->children yet */
|
|
|
- if (core->new_child)
|
|
|
- clk_change_rate(core->new_child);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static int clk_core_set_rate_nolock(struct clk_core *core,
|
|
|
- unsigned long req_rate)
|
|
|
+/**
|
|
|
+ * clk_get_phase - return the phase shift of a clock signal
|
|
|
+ * @clk: clock signal source
|
|
|
+ *
|
|
|
+ * Returns the phase shift of a clock node in degrees, otherwise returns
|
|
|
+ * -EERROR.
|
|
|
+ */
|
|
|
+int clk_get_phase(struct clk *clk)
|
|
|
{
|
|
|
- struct clk_core *top, *fail_clk;
|
|
|
- unsigned long rate = req_rate;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (!core)
|
|
|
+ if (!clk)
|
|
|
return 0;
|
|
|
|
|
|
- /* bail early if nothing to do */
|
|
|
- if (rate == clk_core_get_rate_nolock(core))
|
|
|
- return 0;
|
|
|
+ return clk_core_get_phase(clk->core);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_get_phase);
|
|
|
|
|
|
- if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
|
|
|
- return -EBUSY;
|
|
|
+/**
|
|
|
+ * clk_is_match - check if two clk's point to the same hardware clock
|
|
|
+ * @p: clk compared against q
|
|
|
+ * @q: clk compared against p
|
|
|
+ *
|
|
|
+ * Returns true if the two struct clk pointers both point to the same hardware
|
|
|
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
|
|
|
+ * share the same struct clk_core object.
|
|
|
+ *
|
|
|
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
|
|
|
+ */
|
|
|
+bool clk_is_match(const struct clk *p, const struct clk *q)
|
|
|
+{
|
|
|
+ /* trivial case: identical struct clk's or both NULL */
|
|
|
+ if (p == q)
|
|
|
+ return true;
|
|
|
|
|
|
- /* calculate new rates and get the topmost changed clock */
|
|
|
- top = clk_calc_new_rates(core, rate);
|
|
|
- if (!top)
|
|
|
- return -EINVAL;
|
|
|
+ /* true if clk->core pointers match. Avoid derefing garbage */
|
|
|
+ if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
|
|
|
+ if (p->core == q->core)
|
|
|
+ return true;
|
|
|
|
|
|
- /* notify that we are about to change rates */
|
|
|
- fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
|
|
|
- if (fail_clk) {
|
|
|
- pr_debug("%s: failed to set %s rate\n", __func__,
|
|
|
- fail_clk->name);
|
|
|
- clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
|
|
|
- return -EBUSY;
|
|
|
- }
|
|
|
+ return false;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_is_match);
|
|
|
|
|
|
- /* change the rates */
|
|
|
- clk_change_rate(top);
|
|
|
+/*** debugfs support ***/
|
|
|
|
|
|
- core->req_rate = req_rate;
|
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
|
+#include <linux/debugfs.h>
|
|
|
|
|
|
- return ret;
|
|
|
-}
|
|
|
+static struct dentry *rootdir;
|
|
|
+static int inited = 0;
|
|
|
+static DEFINE_MUTEX(clk_debug_lock);
|
|
|
+static HLIST_HEAD(clk_debug_list);
|
|
|
|
|
|
-/**
|
|
|
- * clk_set_rate - specify a new rate for clk
|
|
|
- * @clk: the clk whose rate is being changed
|
|
|
- * @rate: the new rate for clk
|
|
|
- *
|
|
|
- * In the simplest case clk_set_rate will only adjust the rate of clk.
|
|
|
- *
|
|
|
- * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
|
|
|
- * propagate up to clk's parent; whether or not this happens depends on the
|
|
|
- * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
|
|
|
- * after calling .round_rate then upstream parent propagation is ignored. If
|
|
|
- * *parent_rate comes back with a new rate for clk's parent then we propagate
|
|
|
- * up to clk's parent and set its rate. Upward propagation will continue
|
|
|
- * until either a clk does not support the CLK_SET_RATE_PARENT flag or
|
|
|
- * .round_rate stops requesting changes to clk's parent_rate.
|
|
|
- *
|
|
|
- * Rate changes are accomplished via tree traversal that also recalculates the
|
|
|
- * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
|
|
|
- *
|
|
|
- * Returns 0 on success, -EERROR otherwise.
|
|
|
- */
|
|
|
-int clk_set_rate(struct clk *clk, unsigned long rate)
|
|
|
+static struct hlist_head *all_lists[] = {
|
|
|
+ &clk_root_list,
|
|
|
+ &clk_orphan_list,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct hlist_head *orphan_list[] = {
|
|
|
+ &clk_orphan_list,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
|
|
|
+ int level)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ if (!c)
|
|
|
+ return;
|
|
|
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
|
|
|
+ level * 3 + 1, "",
|
|
|
+ 30 - level * 3, c->name,
|
|
|
+ c->enable_count, c->prepare_count, clk_core_get_rate(c),
|
|
|
+ clk_core_get_accuracy(c), clk_core_get_phase(c));
|
|
|
+}
|
|
|
|
|
|
- /* prevent racing with updates to the clock topology */
|
|
|
- clk_prepare_lock();
|
|
|
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
|
|
+ int level)
|
|
|
+{
|
|
|
+ struct clk_core *child;
|
|
|
|
|
|
- ret = clk_core_set_rate_nolock(clk->core, rate);
|
|
|
+ if (!c)
|
|
|
+ return;
|
|
|
|
|
|
- clk_prepare_unlock();
|
|
|
+ clk_summary_show_one(s, c, level);
|
|
|
|
|
|
- return ret;
|
|
|
+ hlist_for_each_entry(child, &c->children, child_node)
|
|
|
+ clk_summary_show_subtree(s, child, level + 1);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_rate);
|
|
|
|
|
|
-/**
|
|
|
- * clk_set_rate_range - set a rate range for a clock source
|
|
|
- * @clk: clock source
|
|
|
- * @min: desired minimum clock rate in Hz, inclusive
|
|
|
- * @max: desired maximum clock rate in Hz, inclusive
|
|
|
- *
|
|
|
- * Returns success (0) or negative errno.
|
|
|
- */
|
|
|
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
|
|
|
+static int clk_summary_show(struct seq_file *s, void *data)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ struct clk_core *c;
|
|
|
+ struct hlist_head **lists = (struct hlist_head **)s->private;
|
|
|
|
|
|
- if (min > max) {
|
|
|
- pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
|
|
|
- __func__, clk->core->name, clk->dev_id, clk->con_id,
|
|
|
- min, max);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
|
|
|
+ seq_puts(s, "----------------------------------------------------------------------------------------\n");
|
|
|
|
|
|
clk_prepare_lock();
|
|
|
|
|
|
- if (min != clk->min_rate || max != clk->max_rate) {
|
|
|
- clk->min_rate = min;
|
|
|
- clk->max_rate = max;
|
|
|
- ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
|
|
|
- }
|
|
|
+ for (; *lists; lists++)
|
|
|
+ hlist_for_each_entry(c, *lists, child_node)
|
|
|
+ clk_summary_show_subtree(s, c, 0);
|
|
|
|
|
|
clk_prepare_unlock();
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_rate_range);
|
|
|
-
|
|
|
-/**
|
|
|
- * clk_set_min_rate - set a minimum clock rate for a clock source
|
|
|
- * @clk: clock source
|
|
|
- * @rate: desired minimum clock rate in Hz, inclusive
|
|
|
- *
|
|
|
- * Returns success (0) or negative errno.
|
|
|
- */
|
|
|
-int clk_set_min_rate(struct clk *clk, unsigned long rate)
|
|
|
-{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
|
|
|
- return clk_set_rate_range(clk, rate, clk->max_rate);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_min_rate);
|
|
|
|
|
|
-/**
|
|
|
- * clk_set_max_rate - set a maximum clock rate for a clock source
|
|
|
- * @clk: clock source
|
|
|
- * @rate: desired maximum clock rate in Hz, inclusive
|
|
|
- *
|
|
|
- * Returns success (0) or negative errno.
|
|
|
- */
|
|
|
-int clk_set_max_rate(struct clk *clk, unsigned long rate)
|
|
|
+static int clk_summary_open(struct inode *inode, struct file *file)
|
|
|
{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return clk_set_rate_range(clk, clk->min_rate, rate);
|
|
|
+ return single_open(file, clk_summary_show, inode->i_private);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_max_rate);
|
|
|
|
|
|
-/**
|
|
|
- * clk_get_parent - return the parent of a clk
|
|
|
- * @clk: the clk whose parent gets returned
|
|
|
- *
|
|
|
- * Simply returns clk->parent. Returns NULL if clk is NULL.
|
|
|
- */
|
|
|
-struct clk *clk_get_parent(struct clk *clk)
|
|
|
-{
|
|
|
- struct clk *parent;
|
|
|
+static const struct file_operations clk_summary_fops = {
|
|
|
+ .open = clk_summary_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
+};
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
- parent = __clk_get_parent(clk);
|
|
|
- clk_prepare_unlock();
|
|
|
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
|
|
|
+{
|
|
|
+ if (!c)
|
|
|
+ return;
|
|
|
|
|
|
- return parent;
|
|
|
+ seq_printf(s, "\"%s\": { ", c->name);
|
|
|
+ seq_printf(s, "\"enable_count\": %d,", c->enable_count);
|
|
|
+ seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
|
|
|
+ seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
|
|
|
+ seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
|
|
|
+ seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_get_parent);
|
|
|
|
|
|
-/*
|
|
|
- * .get_parent is mandatory for clocks with multiple possible parents. It is
|
|
|
- * optional for single-parent clocks. Always call .get_parent if it is
|
|
|
- * available and WARN if it is missing for multi-parent clocks.
|
|
|
- *
|
|
|
- * For single-parent clocks without .get_parent, first check to see if the
|
|
|
- * .parents array exists, and if so use it to avoid an expensive tree
|
|
|
- * traversal. If .parents does not exist then walk the tree.
|
|
|
- */
|
|
|
-static struct clk_core *__clk_init_parent(struct clk_core *core)
|
|
|
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
|
|
|
{
|
|
|
- struct clk_core *ret = NULL;
|
|
|
- u8 index;
|
|
|
+ struct clk_core *child;
|
|
|
|
|
|
- /* handle the trivial cases */
|
|
|
+ if (!c)
|
|
|
+ return;
|
|
|
|
|
|
- if (!core->num_parents)
|
|
|
- goto out;
|
|
|
+ clk_dump_one(s, c, level);
|
|
|
|
|
|
- if (core->num_parents == 1) {
|
|
|
- if (IS_ERR_OR_NULL(core->parent))
|
|
|
- core->parent = clk_core_lookup(core->parent_names[0]);
|
|
|
- ret = core->parent;
|
|
|
- goto out;
|
|
|
+ hlist_for_each_entry(child, &c->children, child_node) {
|
|
|
+ seq_printf(s, ",");
|
|
|
+ clk_dump_subtree(s, child, level + 1);
|
|
|
}
|
|
|
|
|
|
- if (!core->ops->get_parent) {
|
|
|
- WARN(!core->ops->get_parent,
|
|
|
- "%s: multi-parent clocks must implement .get_parent\n",
|
|
|
- __func__);
|
|
|
- goto out;
|
|
|
- };
|
|
|
-
|
|
|
- /*
|
|
|
- * Do our best to cache parent clocks in core->parents. This prevents
|
|
|
- * unnecessary and expensive lookups. We don't set core->parent here;
|
|
|
- * that is done by the calling function.
|
|
|
- */
|
|
|
-
|
|
|
- index = core->ops->get_parent(core->hw);
|
|
|
-
|
|
|
- if (!core->parents)
|
|
|
- core->parents =
|
|
|
- kcalloc(core->num_parents, sizeof(struct clk *),
|
|
|
- GFP_KERNEL);
|
|
|
-
|
|
|
- ret = clk_core_get_parent_by_index(core, index);
|
|
|
-
|
|
|
-out:
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static void clk_core_reparent(struct clk_core *core,
|
|
|
- struct clk_core *new_parent)
|
|
|
-{
|
|
|
- clk_reparent(core, new_parent);
|
|
|
- __clk_recalc_accuracies(core);
|
|
|
- __clk_recalc_rates(core, POST_RATE_CHANGE);
|
|
|
+ seq_printf(s, "}");
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * clk_has_parent - check if a clock is a possible parent for another
|
|
|
- * @clk: clock source
|
|
|
- * @parent: parent clock source
|
|
|
- *
|
|
|
- * This function can be used in drivers that need to check that a clock can be
|
|
|
- * the parent of another without actually changing the parent.
|
|
|
- *
|
|
|
- * Returns true if @parent is a possible parent for @clk, false otherwise.
|
|
|
- */
|
|
|
-bool clk_has_parent(struct clk *clk, struct clk *parent)
|
|
|
+static int clk_dump(struct seq_file *s, void *data)
|
|
|
{
|
|
|
- struct clk_core *core, *parent_core;
|
|
|
- unsigned int i;
|
|
|
+ struct clk_core *c;
|
|
|
+ bool first_node = true;
|
|
|
+ struct hlist_head **lists = (struct hlist_head **)s->private;
|
|
|
|
|
|
- /* NULL clocks should be nops, so return success if either is NULL. */
|
|
|
- if (!clk || !parent)
|
|
|
- return true;
|
|
|
+ seq_printf(s, "{");
|
|
|
|
|
|
- core = clk->core;
|
|
|
- parent_core = parent->core;
|
|
|
+ clk_prepare_lock();
|
|
|
|
|
|
- /* Optimize for the case where the parent is already the parent. */
|
|
|
- if (core->parent == parent_core)
|
|
|
- return true;
|
|
|
+ for (; *lists; lists++) {
|
|
|
+ hlist_for_each_entry(c, *lists, child_node) {
|
|
|
+ if (!first_node)
|
|
|
+ seq_puts(s, ",");
|
|
|
+ first_node = false;
|
|
|
+ clk_dump_subtree(s, c, 0);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < core->num_parents; i++)
|
|
|
- if (strcmp(core->parent_names[i], parent_core->name) == 0)
|
|
|
- return true;
|
|
|
+ clk_prepare_unlock();
|
|
|
|
|
|
- return false;
|
|
|
+ seq_printf(s, "}");
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_has_parent);
|
|
|
|
|
|
-static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
|
|
|
+
|
|
|
+static int clk_dump_open(struct inode *inode, struct file *file)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
- int p_index = 0;
|
|
|
- unsigned long p_rate = 0;
|
|
|
+ return single_open(file, clk_dump, inode->i_private);
|
|
|
+}
|
|
|
|
|
|
- if (!core)
|
|
|
- return 0;
|
|
|
+static const struct file_operations clk_dump_fops = {
|
|
|
+ .open = clk_dump_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
+};
|
|
|
|
|
|
- /* prevent racing with updates to the clock topology */
|
|
|
- clk_prepare_lock();
|
|
|
+static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
|
|
|
+{
|
|
|
+ struct dentry *d;
|
|
|
+ int ret = -ENOMEM;
|
|
|
|
|
|
- if (core->parent == parent)
|
|
|
+ if (!core || !pdentry) {
|
|
|
+ ret = -EINVAL;
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
- /* verify ops for for multi-parent clks */
|
|
|
- if ((core->num_parents > 1) && (!core->ops->set_parent)) {
|
|
|
- ret = -ENOSYS;
|
|
|
+ d = debugfs_create_dir(core->name, pdentry);
|
|
|
+ if (!d)
|
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
- /* check that we are allowed to re-parent if the clock is in use */
|
|
|
- if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
|
|
|
- ret = -EBUSY;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ core->dentry = d;
|
|
|
+
|
|
|
+ d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->rate);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
+ d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->accuracy);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
+ d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->phase);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
|
|
|
- /* try finding the new parent index */
|
|
|
- if (parent) {
|
|
|
- p_index = clk_fetch_parent_index(core, parent);
|
|
|
- p_rate = parent->rate;
|
|
|
- if (p_index < 0) {
|
|
|
- pr_debug("%s: clk %s can not be parent of clk %s\n",
|
|
|
- __func__, parent->name, core->name);
|
|
|
- ret = p_index;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
+ d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->flags);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
|
|
|
- /* propagate PRE_RATE_CHANGE notifications */
|
|
|
- ret = __clk_speculate_rates(core, p_rate);
|
|
|
+ d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->prepare_count);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
|
|
|
- /* abort if a driver objects */
|
|
|
- if (ret & NOTIFY_STOP_MASK)
|
|
|
- goto out;
|
|
|
+ d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->enable_count);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
|
|
|
- /* do the re-parent */
|
|
|
- ret = __clk_set_parent(core, parent, p_index);
|
|
|
+ d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
|
|
|
+ (u32 *)&core->notifier_count);
|
|
|
+ if (!d)
|
|
|
+ goto err_out;
|
|
|
|
|
|
- /* propagate rate an accuracy recalculation accordingly */
|
|
|
- if (ret) {
|
|
|
- __clk_recalc_rates(core, ABORT_RATE_CHANGE);
|
|
|
- } else {
|
|
|
- __clk_recalc_rates(core, POST_RATE_CHANGE);
|
|
|
- __clk_recalc_accuracies(core);
|
|
|
+ if (core->ops->debug_init) {
|
|
|
+ ret = core->ops->debug_init(core->hw, core->dentry);
|
|
|
+ if (ret)
|
|
|
+ goto err_out;
|
|
|
}
|
|
|
|
|
|
-out:
|
|
|
- clk_prepare_unlock();
|
|
|
+ ret = 0;
|
|
|
+ goto out;
|
|
|
|
|
|
+err_out:
|
|
|
+ debugfs_remove_recursive(core->dentry);
|
|
|
+ core->dentry = NULL;
|
|
|
+out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * clk_set_parent - switch the parent of a mux clk
|
|
|
- * @clk: the mux clk whose input we are switching
|
|
|
- * @parent: the new input to clk
|
|
|
- *
|
|
|
- * Re-parent clk to use parent as its new input source. If clk is in
|
|
|
- * prepared state, the clk will get enabled for the duration of this call. If
|
|
|
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
|
|
|
- * that, the reparenting is glitchy in hardware, etc), use the
|
|
|
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
|
|
|
- *
|
|
|
- * After successfully changing clk's parent clk_set_parent will update the
|
|
|
- * clk topology, sysfs topology and propagate rate recalculation via
|
|
|
- * __clk_recalc_rates.
|
|
|
+ * clk_debug_register - add a clk node to the debugfs clk tree
|
|
|
+ * @core: the clk being added to the debugfs clk tree
|
|
|
*
|
|
|
- * Returns 0 on success, -EERROR otherwise.
|
|
|
+ * Dynamically adds a clk to the debugfs clk tree if debugfs has been
|
|
|
+ * initialized. Otherwise it bails out early since the debugfs clk tree
|
|
|
+ * will be created lazily by clk_debug_init as part of a late_initcall.
|
|
|
*/
|
|
|
-int clk_set_parent(struct clk *clk, struct clk *parent)
|
|
|
+static int clk_debug_register(struct clk_core *core)
|
|
|
{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
|
|
|
+ mutex_lock(&clk_debug_lock);
|
|
|
+ hlist_add_head(&core->debug_node, &clk_debug_list);
|
|
|
+
|
|
|
+ if (!inited)
|
|
|
+ goto unlock;
|
|
|
+
|
|
|
+ ret = clk_debug_create_one(core, rootdir);
|
|
|
+unlock:
|
|
|
+ mutex_unlock(&clk_debug_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_parent);
|
|
|
|
|
|
-/**
|
|
|
- * clk_set_phase - adjust the phase shift of a clock signal
|
|
|
- * @clk: clock signal source
|
|
|
- * @degrees: number of degrees the signal is shifted
|
|
|
- *
|
|
|
- * Shifts the phase of a clock signal by the specified
|
|
|
- * degrees. Returns 0 on success, -EERROR otherwise.
|
|
|
- *
|
|
|
- * This function makes no distinction about the input or reference
|
|
|
- * signal that we adjust the clock signal phase against. For example
|
|
|
- * phase locked-loop clock signal generators we may shift phase with
|
|
|
- * respect to feedback clock signal input, but for other cases the
|
|
|
- * clock phase may be shifted with respect to some other, unspecified
|
|
|
- * signal.
|
|
|
+ /**
|
|
|
+ * clk_debug_unregister - remove a clk node from the debugfs clk tree
|
|
|
+ * @core: the clk being removed from the debugfs clk tree
|
|
|
*
|
|
|
- * Additionally the concept of phase shift does not propagate through
|
|
|
- * the clock tree hierarchy, which sets it apart from clock rates and
|
|
|
- * clock accuracy. A parent clock phase attribute does not have an
|
|
|
- * impact on the phase attribute of a child clock.
|
|
|
+ * Dynamically removes a clk and all it's children clk nodes from the
|
|
|
+ * debugfs clk tree if clk->dentry points to debugfs created by
|
|
|
+ * clk_debug_register in __clk_init.
|
|
|
*/
|
|
|
-int clk_set_phase(struct clk *clk, int degrees)
|
|
|
+static void clk_debug_unregister(struct clk_core *core)
|
|
|
{
|
|
|
- int ret = -EINVAL;
|
|
|
+ mutex_lock(&clk_debug_lock);
|
|
|
+ hlist_del_init(&core->debug_node);
|
|
|
+ debugfs_remove_recursive(core->dentry);
|
|
|
+ core->dentry = NULL;
|
|
|
+ mutex_unlock(&clk_debug_lock);
|
|
|
+}
|
|
|
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
|
|
|
+ void *data, const struct file_operations *fops)
|
|
|
+{
|
|
|
+ struct dentry *d = NULL;
|
|
|
|
|
|
- /* sanity check degrees */
|
|
|
- degrees %= 360;
|
|
|
- if (degrees < 0)
|
|
|
- degrees += 360;
|
|
|
+ if (hw->core->dentry)
|
|
|
+ d = debugfs_create_file(name, mode, hw->core->dentry, data,
|
|
|
+ fops);
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
+ return d;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
|
|
|
|
|
|
- trace_clk_set_phase(clk->core, degrees);
|
|
|
+/**
|
|
|
+ * clk_debug_init - lazily create the debugfs clk tree visualization
|
|
|
+ *
|
|
|
+ * clks are often initialized very early during boot before memory can
|
|
|
+ * be dynamically allocated and well before debugfs is setup.
|
|
|
+ * clk_debug_init walks the clk tree hierarchy while holding
|
|
|
+ * prepare_lock and creates the topology as part of a late_initcall,
|
|
|
+ * thus insuring that clks initialized very early will still be
|
|
|
+ * represented in the debugfs clk tree. This function should only be
|
|
|
+ * called once at boot-time, and all other clks added dynamically will
|
|
|
+ * be done so with clk_debug_register.
|
|
|
+ */
|
|
|
+static int __init clk_debug_init(void)
|
|
|
+{
|
|
|
+ struct clk_core *core;
|
|
|
+ struct dentry *d;
|
|
|
|
|
|
- if (clk->core->ops->set_phase)
|
|
|
- ret = clk->core->ops->set_phase(clk->core->hw, degrees);
|
|
|
+ rootdir = debugfs_create_dir("clk", NULL);
|
|
|
|
|
|
- trace_clk_set_phase_complete(clk->core, degrees);
|
|
|
+ if (!rootdir)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- if (!ret)
|
|
|
- clk->core->phase = degrees;
|
|
|
+ d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
|
|
|
+ &clk_summary_fops);
|
|
|
+ if (!d)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- clk_prepare_unlock();
|
|
|
+ d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
|
|
|
+ &clk_dump_fops);
|
|
|
+ if (!d)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- return ret;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_set_phase);
|
|
|
+ d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
|
|
|
+ &orphan_list, &clk_summary_fops);
|
|
|
+ if (!d)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
-static int clk_core_get_phase(struct clk_core *core)
|
|
|
-{
|
|
|
- int ret;
|
|
|
+ d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
|
|
|
+ &orphan_list, &clk_dump_fops);
|
|
|
+ if (!d)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- clk_prepare_lock();
|
|
|
- ret = core->phase;
|
|
|
- clk_prepare_unlock();
|
|
|
+ mutex_lock(&clk_debug_lock);
|
|
|
+ hlist_for_each_entry(core, &clk_debug_list, debug_node)
|
|
|
+ clk_debug_create_one(core, rootdir);
|
|
|
|
|
|
- return ret;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(clk_get_phase);
|
|
|
+ inited = 1;
|
|
|
+ mutex_unlock(&clk_debug_lock);
|
|
|
|
|
|
-/**
|
|
|
- * clk_get_phase - return the phase shift of a clock signal
|
|
|
- * @clk: clock signal source
|
|
|
- *
|
|
|
- * Returns the phase shift of a clock node in degrees, otherwise returns
|
|
|
- * -EERROR.
|
|
|
- */
|
|
|
-int clk_get_phase(struct clk *clk)
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+late_initcall(clk_debug_init);
|
|
|
+#else
|
|
|
+static inline int clk_debug_register(struct clk_core *core) { return 0; }
|
|
|
+static inline void clk_debug_reparent(struct clk_core *core,
|
|
|
+ struct clk_core *new_parent)
|
|
|
{
|
|
|
- if (!clk)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return clk_core_get_phase(clk->core);
|
|
|
}
|
|
|
-
|
|
|
-/**
|
|
|
- * clk_is_match - check if two clk's point to the same hardware clock
|
|
|
- * @p: clk compared against q
|
|
|
- * @q: clk compared against p
|
|
|
- *
|
|
|
- * Returns true if the two struct clk pointers both point to the same hardware
|
|
|
- * clock node. Put differently, returns true if struct clk *p and struct clk *q
|
|
|
- * share the same struct clk_core object.
|
|
|
- *
|
|
|
- * Returns false otherwise. Note that two NULL clks are treated as matching.
|
|
|
- */
|
|
|
-bool clk_is_match(const struct clk *p, const struct clk *q)
|
|
|
+static inline void clk_debug_unregister(struct clk_core *core)
|
|
|
{
|
|
|
- /* trivial case: identical struct clk's or both NULL */
|
|
|
- if (p == q)
|
|
|
- return true;
|
|
|
-
|
|
|
- /* true if clk->core pointers match. Avoid derefing garbage */
|
|
|
- if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
|
|
|
- if (p->core == q->core)
|
|
|
- return true;
|
|
|
-
|
|
|
- return false;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(clk_is_match);
|
|
|
+#endif
|
|
|
|
|
|
/**
|
|
|
* __clk_init - initialize the data structures in a struct clk
|