clock.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * arch/sh/kernel/cpu/clock.c - SuperH clock framework
  3. *
  4. * Copyright (C) 2005 - 2009 Paul Mundt
  5. *
  6. * This clock framework is derived from the OMAP version by:
  7. *
  8. * Copyright (C) 2004 - 2008 Nokia Corporation
  9. * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  10. *
  11. * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
  12. *
  13. * This file is subject to the terms and conditions of the GNU General Public
  14. * License. See the file "COPYING" in the main directory of this archive
  15. * for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/mutex.h>
  21. #include <linux/list.h>
  22. #include <linux/kobject.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/err.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/proc_fs.h>
  28. #include <asm/clock.h>
  29. #include <asm/timer.h>
  30. static LIST_HEAD(clock_list);
  31. static DEFINE_SPINLOCK(clock_lock);
  32. static DEFINE_MUTEX(clock_list_sem);
  33. /*
  34. * Each subtype is expected to define the init routines for these clocks,
  35. * as each subtype (or processor family) will have these clocks at the
  36. * very least. These are all provided through the CPG, which even some of
  37. * the more quirky parts (such as ST40, SH4-202, etc.) still have.
  38. *
  39. * The processor-specific code is expected to register any additional
  40. * clock sources that are of interest.
  41. */
  42. static struct clk master_clk = {
  43. .name = "master_clk",
  44. .flags = CLK_ENABLE_ON_INIT,
  45. .rate = CONFIG_SH_PCLK_FREQ,
  46. };
  47. static struct clk module_clk = {
  48. .name = "module_clk",
  49. .parent = &master_clk,
  50. .flags = CLK_ENABLE_ON_INIT,
  51. };
  52. static struct clk bus_clk = {
  53. .name = "bus_clk",
  54. .parent = &master_clk,
  55. .flags = CLK_ENABLE_ON_INIT,
  56. };
  57. static struct clk cpu_clk = {
  58. .name = "cpu_clk",
  59. .parent = &master_clk,
  60. .flags = CLK_ENABLE_ON_INIT,
  61. };
  62. /*
  63. * The ordering of these clocks matters, do not change it.
  64. */
  65. static struct clk *onchip_clocks[] = {
  66. &master_clk,
  67. &module_clk,
  68. &bus_clk,
  69. &cpu_clk,
  70. };
  71. /* Used for clocks that always have same value as the parent clock */
  72. unsigned long followparent_recalc(struct clk *clk)
  73. {
  74. return clk->parent->rate;
  75. }
  76. int clk_reparent(struct clk *child, struct clk *parent)
  77. {
  78. list_del_init(&child->sibling);
  79. if (parent)
  80. list_add(&child->sibling, &parent->children);
  81. child->parent = parent;
  82. /* now do the debugfs renaming to reattach the child
  83. to the proper parent */
  84. return 0;
  85. }
  86. /* Propagate rate to children */
  87. void propagate_rate(struct clk *tclk)
  88. {
  89. struct clk *clkp;
  90. list_for_each_entry(clkp, &tclk->children, sibling) {
  91. if (clkp->ops->recalc)
  92. clkp->rate = clkp->ops->recalc(clkp);
  93. propagate_rate(clkp);
  94. }
  95. }
  96. static void __clk_disable(struct clk *clk)
  97. {
  98. if (clk->usecount == 0) {
  99. printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
  100. clk->name);
  101. WARN_ON(1);
  102. return;
  103. }
  104. if (!(--clk->usecount)) {
  105. if (likely(clk->ops && clk->ops->disable))
  106. clk->ops->disable(clk);
  107. if (likely(clk->parent))
  108. __clk_disable(clk->parent);
  109. }
  110. }
  111. void clk_disable(struct clk *clk)
  112. {
  113. unsigned long flags;
  114. if (!clk)
  115. return;
  116. spin_lock_irqsave(&clock_lock, flags);
  117. __clk_disable(clk);
  118. spin_unlock_irqrestore(&clock_lock, flags);
  119. }
  120. EXPORT_SYMBOL_GPL(clk_disable);
  121. static int __clk_enable(struct clk *clk)
  122. {
  123. int ret = 0;
  124. if (clk->usecount++ == 0) {
  125. if (clk->parent) {
  126. ret = __clk_enable(clk->parent);
  127. if (unlikely(ret))
  128. goto err;
  129. }
  130. if (clk->ops && clk->ops->enable) {
  131. ret = clk->ops->enable(clk);
  132. if (ret) {
  133. if (clk->parent)
  134. __clk_disable(clk->parent);
  135. goto err;
  136. }
  137. }
  138. }
  139. return ret;
  140. err:
  141. clk->usecount--;
  142. return ret;
  143. }
  144. int clk_enable(struct clk *clk)
  145. {
  146. unsigned long flags;
  147. int ret;
  148. if (!clk)
  149. return -EINVAL;
  150. spin_lock_irqsave(&clock_lock, flags);
  151. ret = __clk_enable(clk);
  152. spin_unlock_irqrestore(&clock_lock, flags);
  153. return ret;
  154. }
  155. EXPORT_SYMBOL_GPL(clk_enable);
  156. static LIST_HEAD(root_clks);
  157. /**
  158. * recalculate_root_clocks - recalculate and propagate all root clocks
  159. *
  160. * Recalculates all root clocks (clocks with no parent), which if the
  161. * clock's .recalc is set correctly, should also propagate their rates.
  162. * Called at init.
  163. */
  164. void recalculate_root_clocks(void)
  165. {
  166. struct clk *clkp;
  167. list_for_each_entry(clkp, &root_clks, sibling) {
  168. if (clkp->ops->recalc)
  169. clkp->rate = clkp->ops->recalc(clkp);
  170. propagate_rate(clkp);
  171. }
  172. }
  173. int clk_register(struct clk *clk)
  174. {
  175. if (clk == NULL || IS_ERR(clk))
  176. return -EINVAL;
  177. /*
  178. * trap out already registered clocks
  179. */
  180. if (clk->node.next || clk->node.prev)
  181. return 0;
  182. mutex_lock(&clock_list_sem);
  183. INIT_LIST_HEAD(&clk->children);
  184. clk->usecount = 0;
  185. if (clk->parent)
  186. list_add(&clk->sibling, &clk->parent->children);
  187. else
  188. list_add(&clk->sibling, &root_clks);
  189. list_add(&clk->node, &clock_list);
  190. if (clk->ops->init)
  191. clk->ops->init(clk);
  192. mutex_unlock(&clock_list_sem);
  193. return 0;
  194. }
  195. EXPORT_SYMBOL_GPL(clk_register);
  196. void clk_unregister(struct clk *clk)
  197. {
  198. mutex_lock(&clock_list_sem);
  199. list_del(&clk->sibling);
  200. list_del(&clk->node);
  201. mutex_unlock(&clock_list_sem);
  202. }
  203. EXPORT_SYMBOL_GPL(clk_unregister);
  204. static void clk_enable_init_clocks(void)
  205. {
  206. struct clk *clkp;
  207. list_for_each_entry(clkp, &clock_list, node)
  208. if (clkp->flags & CLK_ENABLE_ON_INIT)
  209. clk_enable(clkp);
  210. }
  211. unsigned long clk_get_rate(struct clk *clk)
  212. {
  213. return clk->rate;
  214. }
  215. EXPORT_SYMBOL_GPL(clk_get_rate);
  216. int clk_set_rate(struct clk *clk, unsigned long rate)
  217. {
  218. return clk_set_rate_ex(clk, rate, 0);
  219. }
  220. EXPORT_SYMBOL_GPL(clk_set_rate);
  221. int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
  222. {
  223. int ret = -EOPNOTSUPP;
  224. if (likely(clk->ops && clk->ops->set_rate)) {
  225. unsigned long flags;
  226. spin_lock_irqsave(&clock_lock, flags);
  227. ret = clk->ops->set_rate(clk, rate, algo_id);
  228. if (ret == 0) {
  229. if (clk->ops->recalc)
  230. clk->rate = clk->ops->recalc(clk);
  231. propagate_rate(clk);
  232. }
  233. spin_unlock_irqrestore(&clock_lock, flags);
  234. }
  235. return ret;
  236. }
  237. EXPORT_SYMBOL_GPL(clk_set_rate_ex);
  238. void clk_recalc_rate(struct clk *clk)
  239. {
  240. unsigned long flags;
  241. if (!clk->ops->recalc)
  242. return;
  243. spin_lock_irqsave(&clock_lock, flags);
  244. clk->rate = clk->ops->recalc(clk);
  245. propagate_rate(clk);
  246. spin_unlock_irqrestore(&clock_lock, flags);
  247. }
  248. EXPORT_SYMBOL_GPL(clk_recalc_rate);
  249. int clk_set_parent(struct clk *clk, struct clk *parent)
  250. {
  251. unsigned long flags;
  252. int ret = -EINVAL;
  253. if (!parent || !clk)
  254. return ret;
  255. if (clk->parent == parent)
  256. return 0;
  257. spin_lock_irqsave(&clock_lock, flags);
  258. if (clk->usecount == 0) {
  259. if (clk->ops->set_parent)
  260. ret = clk->ops->set_parent(clk, parent);
  261. else
  262. ret = clk_reparent(clk, parent);
  263. if (ret == 0) {
  264. pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
  265. clk->name, clk->parent->name, clk->rate);
  266. if (clk->ops->recalc)
  267. clk->rate = clk->ops->recalc(clk);
  268. propagate_rate(clk);
  269. }
  270. } else
  271. ret = -EBUSY;
  272. spin_unlock_irqrestore(&clock_lock, flags);
  273. return ret;
  274. }
  275. EXPORT_SYMBOL_GPL(clk_set_parent);
  276. struct clk *clk_get_parent(struct clk *clk)
  277. {
  278. return clk->parent;
  279. }
  280. EXPORT_SYMBOL_GPL(clk_get_parent);
  281. long clk_round_rate(struct clk *clk, unsigned long rate)
  282. {
  283. if (likely(clk->ops && clk->ops->round_rate)) {
  284. unsigned long flags, rounded;
  285. spin_lock_irqsave(&clock_lock, flags);
  286. rounded = clk->ops->round_rate(clk, rate);
  287. spin_unlock_irqrestore(&clock_lock, flags);
  288. return rounded;
  289. }
  290. return clk_get_rate(clk);
  291. }
  292. EXPORT_SYMBOL_GPL(clk_round_rate);
  293. /*
  294. * Returns a clock. Note that we first try to use device id on the bus
  295. * and clock name. If this fails, we try to use clock name only.
  296. */
  297. struct clk *clk_get(struct device *dev, const char *id)
  298. {
  299. struct clk *p, *clk = ERR_PTR(-ENOENT);
  300. int idno;
  301. if (dev == NULL || dev->bus != &platform_bus_type)
  302. idno = -1;
  303. else
  304. idno = to_platform_device(dev)->id;
  305. mutex_lock(&clock_list_sem);
  306. list_for_each_entry(p, &clock_list, node) {
  307. if (p->id == idno &&
  308. strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
  309. clk = p;
  310. goto found;
  311. }
  312. }
  313. list_for_each_entry(p, &clock_list, node) {
  314. if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
  315. clk = p;
  316. break;
  317. }
  318. }
  319. found:
  320. mutex_unlock(&clock_list_sem);
  321. return clk;
  322. }
  323. EXPORT_SYMBOL_GPL(clk_get);
  324. void clk_put(struct clk *clk)
  325. {
  326. if (clk && !IS_ERR(clk))
  327. module_put(clk->owner);
  328. }
  329. EXPORT_SYMBOL_GPL(clk_put);
  330. void __init __attribute__ ((weak))
  331. arch_init_clk_ops(struct clk_ops **ops, int type)
  332. {
  333. }
  334. int __init __attribute__ ((weak))
  335. arch_clk_init(void)
  336. {
  337. return 0;
  338. }
  339. static int show_clocks(char *buf, char **start, off_t off,
  340. int len, int *eof, void *data)
  341. {
  342. struct clk *clk;
  343. char *p = buf;
  344. list_for_each_entry_reverse(clk, &clock_list, node) {
  345. unsigned long rate = clk_get_rate(clk);
  346. p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
  347. rate / 1000000, (rate % 1000000) / 10000,
  348. (clk->usecount > 0) ? "enabled" : "disabled");
  349. }
  350. return p - buf;
  351. }
  352. #ifdef CONFIG_PM
  353. static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
  354. {
  355. static pm_message_t prev_state;
  356. struct clk *clkp;
  357. switch (state.event) {
  358. case PM_EVENT_ON:
  359. /* Resumeing from hibernation */
  360. if (prev_state.event != PM_EVENT_FREEZE)
  361. break;
  362. list_for_each_entry(clkp, &clock_list, node) {
  363. if (likely(clkp->ops)) {
  364. unsigned long rate = clkp->rate;
  365. if (likely(clkp->ops->set_parent))
  366. clkp->ops->set_parent(clkp,
  367. clkp->parent);
  368. if (likely(clkp->ops->set_rate))
  369. clkp->ops->set_rate(clkp,
  370. rate, NO_CHANGE);
  371. else if (likely(clkp->ops->recalc))
  372. clkp->rate = clkp->ops->recalc(clkp);
  373. }
  374. }
  375. break;
  376. case PM_EVENT_FREEZE:
  377. break;
  378. case PM_EVENT_SUSPEND:
  379. break;
  380. }
  381. prev_state = state;
  382. return 0;
  383. }
  384. static int clks_sysdev_resume(struct sys_device *dev)
  385. {
  386. return clks_sysdev_suspend(dev, PMSG_ON);
  387. }
  388. static struct sysdev_class clks_sysdev_class = {
  389. .name = "clks",
  390. };
  391. static struct sysdev_driver clks_sysdev_driver = {
  392. .suspend = clks_sysdev_suspend,
  393. .resume = clks_sysdev_resume,
  394. };
  395. static struct sys_device clks_sysdev_dev = {
  396. .cls = &clks_sysdev_class,
  397. };
  398. static int __init clk_sysdev_init(void)
  399. {
  400. sysdev_class_register(&clks_sysdev_class);
  401. sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
  402. sysdev_register(&clks_sysdev_dev);
  403. return 0;
  404. }
  405. subsys_initcall(clk_sysdev_init);
  406. #endif
  407. int __init clk_init(void)
  408. {
  409. int i, ret = 0;
  410. BUG_ON(!master_clk.rate);
  411. for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
  412. struct clk *clk = onchip_clocks[i];
  413. arch_init_clk_ops(&clk->ops, i);
  414. ret |= clk_register(clk);
  415. }
  416. ret |= arch_clk_init();
  417. /* Kick the child clocks.. */
  418. recalculate_root_clocks();
  419. /* Enable the necessary init clocks */
  420. clk_enable_init_clocks();
  421. return ret;
  422. }
  423. static int __init clk_proc_init(void)
  424. {
  425. struct proc_dir_entry *p;
  426. p = create_proc_read_entry("clocks", S_IRUSR, NULL,
  427. show_clocks, NULL);
  428. if (unlikely(!p))
  429. return -EINVAL;
  430. return 0;
  431. }
  432. subsys_initcall(clk_proc_init);