clock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. /*
  2. * Clock and PLL control for DaVinci devices
  3. *
  4. * Copyright (C) 2006-2007 Texas Instruments.
  5. * Copyright (C) 2008-2009 Deep Root Systems, LLC
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/clk.h>
  17. #include <linux/err.h>
  18. #include <linux/mutex.h>
  19. #include <linux/io.h>
  20. #include <linux/delay.h>
  21. #include <mach/hardware.h>
  22. #include <mach/clock.h>
  23. #include "psc.h"
  24. #include <mach/cputype.h>
  25. #include "clock.h"
  26. static LIST_HEAD(clocks);
  27. static DEFINE_MUTEX(clocks_mutex);
  28. static DEFINE_SPINLOCK(clockfw_lock);
  29. void davinci_clk_enable(struct clk *clk)
  30. {
  31. if (clk->parent)
  32. davinci_clk_enable(clk->parent);
  33. if (clk->usecount++ == 0) {
  34. if (clk->flags & CLK_PSC)
  35. davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  36. true, clk->flags);
  37. else if (clk->clk_enable)
  38. clk->clk_enable(clk);
  39. }
  40. }
  41. void davinci_clk_disable(struct clk *clk)
  42. {
  43. if (WARN_ON(clk->usecount == 0))
  44. return;
  45. if (--clk->usecount == 0) {
  46. if (!(clk->flags & CLK_PLL) && (clk->flags & CLK_PSC))
  47. davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  48. false, clk->flags);
  49. else if (clk->clk_disable)
  50. clk->clk_disable(clk);
  51. }
  52. if (clk->parent)
  53. davinci_clk_disable(clk->parent);
  54. }
  55. int davinci_clk_reset(struct clk *clk, bool reset)
  56. {
  57. unsigned long flags;
  58. if (clk == NULL || IS_ERR(clk))
  59. return -EINVAL;
  60. spin_lock_irqsave(&clockfw_lock, flags);
  61. if (clk->flags & CLK_PSC)
  62. davinci_psc_reset(clk->gpsc, clk->lpsc, reset);
  63. spin_unlock_irqrestore(&clockfw_lock, flags);
  64. return 0;
  65. }
  66. EXPORT_SYMBOL(davinci_clk_reset);
  67. int davinci_clk_reset_assert(struct clk *clk)
  68. {
  69. if (clk == NULL || IS_ERR(clk) || !clk->reset)
  70. return -EINVAL;
  71. return clk->reset(clk, true);
  72. }
  73. EXPORT_SYMBOL(davinci_clk_reset_assert);
  74. int davinci_clk_reset_deassert(struct clk *clk)
  75. {
  76. if (clk == NULL || IS_ERR(clk) || !clk->reset)
  77. return -EINVAL;
  78. return clk->reset(clk, false);
  79. }
  80. EXPORT_SYMBOL(davinci_clk_reset_deassert);
  81. int clk_enable(struct clk *clk)
  82. {
  83. unsigned long flags;
  84. if (!clk)
  85. return 0;
  86. else if (IS_ERR(clk))
  87. return -EINVAL;
  88. spin_lock_irqsave(&clockfw_lock, flags);
  89. davinci_clk_enable(clk);
  90. spin_unlock_irqrestore(&clockfw_lock, flags);
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(clk_enable);
  94. void clk_disable(struct clk *clk)
  95. {
  96. unsigned long flags;
  97. if (clk == NULL || IS_ERR(clk))
  98. return;
  99. spin_lock_irqsave(&clockfw_lock, flags);
  100. davinci_clk_disable(clk);
  101. spin_unlock_irqrestore(&clockfw_lock, flags);
  102. }
  103. EXPORT_SYMBOL(clk_disable);
  104. unsigned long clk_get_rate(struct clk *clk)
  105. {
  106. if (clk == NULL || IS_ERR(clk))
  107. return 0;
  108. return clk->rate;
  109. }
  110. EXPORT_SYMBOL(clk_get_rate);
  111. long clk_round_rate(struct clk *clk, unsigned long rate)
  112. {
  113. if (clk == NULL || IS_ERR(clk))
  114. return 0;
  115. if (clk->round_rate)
  116. return clk->round_rate(clk, rate);
  117. return clk->rate;
  118. }
  119. EXPORT_SYMBOL(clk_round_rate);
  120. /* Propagate rate to children */
  121. static void propagate_rate(struct clk *root)
  122. {
  123. struct clk *clk;
  124. list_for_each_entry(clk, &root->children, childnode) {
  125. if (clk->recalc)
  126. clk->rate = clk->recalc(clk);
  127. propagate_rate(clk);
  128. }
  129. }
  130. int clk_set_rate(struct clk *clk, unsigned long rate)
  131. {
  132. unsigned long flags;
  133. int ret = -EINVAL;
  134. if (!clk)
  135. return 0;
  136. else if (IS_ERR(clk))
  137. return -EINVAL;
  138. if (clk->set_rate)
  139. ret = clk->set_rate(clk, rate);
  140. spin_lock_irqsave(&clockfw_lock, flags);
  141. if (ret == 0) {
  142. if (clk->recalc)
  143. clk->rate = clk->recalc(clk);
  144. propagate_rate(clk);
  145. }
  146. spin_unlock_irqrestore(&clockfw_lock, flags);
  147. return ret;
  148. }
  149. EXPORT_SYMBOL(clk_set_rate);
  150. int clk_set_parent(struct clk *clk, struct clk *parent)
  151. {
  152. unsigned long flags;
  153. if (!clk)
  154. return 0;
  155. else if (IS_ERR(clk))
  156. return -EINVAL;
  157. /* Cannot change parent on enabled clock */
  158. if (WARN_ON(clk->usecount))
  159. return -EINVAL;
  160. mutex_lock(&clocks_mutex);
  161. if (clk->set_parent) {
  162. int ret = clk->set_parent(clk, parent);
  163. if (ret) {
  164. mutex_unlock(&clocks_mutex);
  165. return ret;
  166. }
  167. }
  168. clk->parent = parent;
  169. list_del_init(&clk->childnode);
  170. list_add(&clk->childnode, &clk->parent->children);
  171. mutex_unlock(&clocks_mutex);
  172. spin_lock_irqsave(&clockfw_lock, flags);
  173. if (clk->recalc)
  174. clk->rate = clk->recalc(clk);
  175. propagate_rate(clk);
  176. spin_unlock_irqrestore(&clockfw_lock, flags);
  177. return 0;
  178. }
  179. EXPORT_SYMBOL(clk_set_parent);
  180. struct clk *clk_get_parent(struct clk *clk)
  181. {
  182. if (!clk)
  183. return NULL;
  184. return clk->parent;
  185. }
  186. EXPORT_SYMBOL(clk_get_parent);
  187. int clk_register(struct clk *clk)
  188. {
  189. if (clk == NULL || IS_ERR(clk))
  190. return -EINVAL;
  191. if (WARN(clk->parent && !clk->parent->rate,
  192. "CLK: %s parent %s has no rate!\n",
  193. clk->name, clk->parent->name))
  194. return -EINVAL;
  195. INIT_LIST_HEAD(&clk->children);
  196. mutex_lock(&clocks_mutex);
  197. list_add_tail(&clk->node, &clocks);
  198. if (clk->parent) {
  199. if (clk->set_parent) {
  200. int ret = clk->set_parent(clk, clk->parent);
  201. if (ret) {
  202. mutex_unlock(&clocks_mutex);
  203. return ret;
  204. }
  205. }
  206. list_add_tail(&clk->childnode, &clk->parent->children);
  207. }
  208. mutex_unlock(&clocks_mutex);
  209. /* If rate is already set, use it */
  210. if (clk->rate)
  211. return 0;
  212. /* Else, see if there is a way to calculate it */
  213. if (clk->recalc)
  214. clk->rate = clk->recalc(clk);
  215. /* Otherwise, default to parent rate */
  216. else if (clk->parent)
  217. clk->rate = clk->parent->rate;
  218. return 0;
  219. }
  220. EXPORT_SYMBOL(clk_register);
  221. void clk_unregister(struct clk *clk)
  222. {
  223. if (clk == NULL || IS_ERR(clk))
  224. return;
  225. mutex_lock(&clocks_mutex);
  226. list_del(&clk->node);
  227. list_del(&clk->childnode);
  228. mutex_unlock(&clocks_mutex);
  229. }
  230. EXPORT_SYMBOL(clk_unregister);
  231. #ifdef CONFIG_DAVINCI_RESET_CLOCKS
  232. /*
  233. * Disable any unused clocks left on by the bootloader
  234. */
  235. int __init davinci_clk_disable_unused(void)
  236. {
  237. struct clk *ck;
  238. spin_lock_irq(&clockfw_lock);
  239. list_for_each_entry(ck, &clocks, node) {
  240. if (ck->usecount > 0)
  241. continue;
  242. if (!(ck->flags & CLK_PSC))
  243. continue;
  244. /* ignore if in Disabled or SwRstDisable states */
  245. if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
  246. continue;
  247. pr_debug("Clocks: disable unused %s\n", ck->name);
  248. davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
  249. false, ck->flags);
  250. }
  251. spin_unlock_irq(&clockfw_lock);
  252. return 0;
  253. }
  254. #endif
  255. static unsigned long clk_sysclk_recalc(struct clk *clk)
  256. {
  257. u32 v, plldiv;
  258. struct pll_data *pll;
  259. unsigned long rate = clk->rate;
  260. /* If this is the PLL base clock, no more calculations needed */
  261. if (clk->pll_data)
  262. return rate;
  263. if (WARN_ON(!clk->parent))
  264. return rate;
  265. rate = clk->parent->rate;
  266. /* Otherwise, the parent must be a PLL */
  267. if (WARN_ON(!clk->parent->pll_data))
  268. return rate;
  269. pll = clk->parent->pll_data;
  270. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  271. if (clk->flags & PRE_PLL)
  272. rate = pll->input_rate;
  273. if (!clk->div_reg)
  274. return rate;
  275. v = __raw_readl(pll->base + clk->div_reg);
  276. if (v & PLLDIV_EN) {
  277. plldiv = (v & pll->div_ratio_mask) + 1;
  278. if (plldiv)
  279. rate /= plldiv;
  280. }
  281. return rate;
  282. }
  283. int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
  284. {
  285. unsigned v;
  286. struct pll_data *pll;
  287. unsigned long input;
  288. unsigned ratio = 0;
  289. /* If this is the PLL base clock, wrong function to call */
  290. if (clk->pll_data)
  291. return -EINVAL;
  292. /* There must be a parent... */
  293. if (WARN_ON(!clk->parent))
  294. return -EINVAL;
  295. /* ... the parent must be a PLL... */
  296. if (WARN_ON(!clk->parent->pll_data))
  297. return -EINVAL;
  298. /* ... and this clock must have a divider. */
  299. if (WARN_ON(!clk->div_reg))
  300. return -EINVAL;
  301. pll = clk->parent->pll_data;
  302. input = clk->parent->rate;
  303. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  304. if (clk->flags & PRE_PLL)
  305. input = pll->input_rate;
  306. if (input > rate) {
  307. /*
  308. * Can afford to provide an output little higher than requested
  309. * only if maximum rate supported by hardware on this sysclk
  310. * is known.
  311. */
  312. if (clk->maxrate) {
  313. ratio = DIV_ROUND_CLOSEST(input, rate);
  314. if (input / ratio > clk->maxrate)
  315. ratio = 0;
  316. }
  317. if (ratio == 0)
  318. ratio = DIV_ROUND_UP(input, rate);
  319. ratio--;
  320. }
  321. if (ratio > pll->div_ratio_mask)
  322. return -EINVAL;
  323. do {
  324. v = __raw_readl(pll->base + PLLSTAT);
  325. } while (v & PLLSTAT_GOSTAT);
  326. v = __raw_readl(pll->base + clk->div_reg);
  327. v &= ~pll->div_ratio_mask;
  328. v |= ratio | PLLDIV_EN;
  329. __raw_writel(v, pll->base + clk->div_reg);
  330. v = __raw_readl(pll->base + PLLCMD);
  331. v |= PLLCMD_GOSET;
  332. __raw_writel(v, pll->base + PLLCMD);
  333. do {
  334. v = __raw_readl(pll->base + PLLSTAT);
  335. } while (v & PLLSTAT_GOSTAT);
  336. return 0;
  337. }
  338. EXPORT_SYMBOL(davinci_set_sysclk_rate);
  339. static unsigned long clk_leafclk_recalc(struct clk *clk)
  340. {
  341. if (WARN_ON(!clk->parent))
  342. return clk->rate;
  343. return clk->parent->rate;
  344. }
  345. int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
  346. {
  347. clk->rate = rate;
  348. return 0;
  349. }
  350. static unsigned long clk_pllclk_recalc(struct clk *clk)
  351. {
  352. u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
  353. u8 bypass;
  354. struct pll_data *pll = clk->pll_data;
  355. unsigned long rate = clk->rate;
  356. ctrl = __raw_readl(pll->base + PLLCTL);
  357. rate = pll->input_rate = clk->parent->rate;
  358. if (ctrl & PLLCTL_PLLEN) {
  359. bypass = 0;
  360. mult = __raw_readl(pll->base + PLLM);
  361. if (cpu_is_davinci_dm365())
  362. mult = 2 * (mult & PLLM_PLLM_MASK);
  363. else
  364. mult = (mult & PLLM_PLLM_MASK) + 1;
  365. } else
  366. bypass = 1;
  367. if (pll->flags & PLL_HAS_PREDIV) {
  368. prediv = __raw_readl(pll->base + PREDIV);
  369. if (prediv & PLLDIV_EN)
  370. prediv = (prediv & pll->div_ratio_mask) + 1;
  371. else
  372. prediv = 1;
  373. }
  374. /* pre-divider is fixed, but (some?) chips won't report that */
  375. if (cpu_is_davinci_dm355() && pll->num == 1)
  376. prediv = 8;
  377. if (pll->flags & PLL_HAS_POSTDIV) {
  378. postdiv = __raw_readl(pll->base + POSTDIV);
  379. if (postdiv & PLLDIV_EN)
  380. postdiv = (postdiv & pll->div_ratio_mask) + 1;
  381. else
  382. postdiv = 1;
  383. }
  384. if (!bypass) {
  385. rate /= prediv;
  386. rate *= mult;
  387. rate /= postdiv;
  388. }
  389. pr_debug("PLL%d: input = %lu MHz [ ",
  390. pll->num, clk->parent->rate / 1000000);
  391. if (bypass)
  392. pr_debug("bypass ");
  393. if (prediv > 1)
  394. pr_debug("/ %d ", prediv);
  395. if (mult > 1)
  396. pr_debug("* %d ", mult);
  397. if (postdiv > 1)
  398. pr_debug("/ %d ", postdiv);
  399. pr_debug("] --> %lu MHz output.\n", rate / 1000000);
  400. return rate;
  401. }
  402. /**
  403. * davinci_set_pllrate - set the output rate of a given PLL.
  404. *
  405. * Note: Currently tested to work with OMAP-L138 only.
  406. *
  407. * @pll: pll whose rate needs to be changed.
  408. * @prediv: The pre divider value. Passing 0 disables the pre-divider.
  409. * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
  410. * @postdiv: The post divider value. Passing 0 disables the post-divider.
  411. */
  412. int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
  413. unsigned int mult, unsigned int postdiv)
  414. {
  415. u32 ctrl;
  416. unsigned int locktime;
  417. unsigned long flags;
  418. if (pll->base == NULL)
  419. return -EINVAL;
  420. /*
  421. * PLL lock time required per OMAP-L138 datasheet is
  422. * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
  423. * as 4 and OSCIN cycle as 25 MHz.
  424. */
  425. if (prediv) {
  426. locktime = ((2000 * prediv) / 100);
  427. prediv = (prediv - 1) | PLLDIV_EN;
  428. } else {
  429. locktime = PLL_LOCK_TIME;
  430. }
  431. if (postdiv)
  432. postdiv = (postdiv - 1) | PLLDIV_EN;
  433. if (mult)
  434. mult = mult - 1;
  435. /* Protect against simultaneous calls to PLL setting seqeunce */
  436. spin_lock_irqsave(&clockfw_lock, flags);
  437. ctrl = __raw_readl(pll->base + PLLCTL);
  438. /* Switch the PLL to bypass mode */
  439. ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
  440. __raw_writel(ctrl, pll->base + PLLCTL);
  441. udelay(PLL_BYPASS_TIME);
  442. /* Reset and enable PLL */
  443. ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
  444. __raw_writel(ctrl, pll->base + PLLCTL);
  445. if (pll->flags & PLL_HAS_PREDIV)
  446. __raw_writel(prediv, pll->base + PREDIV);
  447. __raw_writel(mult, pll->base + PLLM);
  448. if (pll->flags & PLL_HAS_POSTDIV)
  449. __raw_writel(postdiv, pll->base + POSTDIV);
  450. udelay(PLL_RESET_TIME);
  451. /* Bring PLL out of reset */
  452. ctrl |= PLLCTL_PLLRST;
  453. __raw_writel(ctrl, pll->base + PLLCTL);
  454. udelay(locktime);
  455. /* Remove PLL from bypass mode */
  456. ctrl |= PLLCTL_PLLEN;
  457. __raw_writel(ctrl, pll->base + PLLCTL);
  458. spin_unlock_irqrestore(&clockfw_lock, flags);
  459. return 0;
  460. }
  461. EXPORT_SYMBOL(davinci_set_pllrate);
  462. /**
  463. * davinci_set_refclk_rate() - Set the reference clock rate
  464. * @rate: The new rate.
  465. *
  466. * Sets the reference clock rate to a given value. This will most likely
  467. * result in the entire clock tree getting updated.
  468. *
  469. * This is used to support boards which use a reference clock different
  470. * than that used by default in <soc>.c file. The reference clock rate
  471. * should be updated early in the boot process; ideally soon after the
  472. * clock tree has been initialized once with the default reference clock
  473. * rate (davinci_clk_init()).
  474. *
  475. * Returns 0 on success, error otherwise.
  476. */
  477. int davinci_set_refclk_rate(unsigned long rate)
  478. {
  479. struct clk *refclk;
  480. refclk = clk_get(NULL, "ref");
  481. if (IS_ERR(refclk)) {
  482. pr_err("%s: failed to get reference clock\n", __func__);
  483. return PTR_ERR(refclk);
  484. }
  485. clk_set_rate(refclk, rate);
  486. clk_put(refclk);
  487. return 0;
  488. }
  489. int __init davinci_clk_init(struct clk_lookup *clocks)
  490. {
  491. struct clk_lookup *c;
  492. struct clk *clk;
  493. size_t num_clocks = 0;
  494. for (c = clocks; c->clk; c++) {
  495. clk = c->clk;
  496. if (!clk->recalc) {
  497. /* Check if clock is a PLL */
  498. if (clk->pll_data)
  499. clk->recalc = clk_pllclk_recalc;
  500. /* Else, if it is a PLL-derived clock */
  501. else if (clk->flags & CLK_PLL)
  502. clk->recalc = clk_sysclk_recalc;
  503. /* Otherwise, it is a leaf clock (PSC clock) */
  504. else if (clk->parent)
  505. clk->recalc = clk_leafclk_recalc;
  506. }
  507. if (clk->pll_data) {
  508. struct pll_data *pll = clk->pll_data;
  509. if (!pll->div_ratio_mask)
  510. pll->div_ratio_mask = PLLDIV_RATIO_MASK;
  511. if (pll->phys_base && !pll->base) {
  512. pll->base = ioremap(pll->phys_base, SZ_4K);
  513. WARN_ON(!pll->base);
  514. }
  515. }
  516. if (clk->recalc)
  517. clk->rate = clk->recalc(clk);
  518. if (clk->lpsc)
  519. clk->flags |= CLK_PSC;
  520. if (clk->flags & PSC_LRST)
  521. clk->reset = davinci_clk_reset;
  522. clk_register(clk);
  523. num_clocks++;
  524. /* Turn on clocks that Linux doesn't otherwise manage */
  525. if (clk->flags & ALWAYS_ENABLED)
  526. clk_enable(clk);
  527. }
  528. clkdev_add_table(clocks, num_clocks);
  529. return 0;
  530. }
  531. #ifdef CONFIG_DEBUG_FS
  532. #include <linux/debugfs.h>
  533. #include <linux/seq_file.h>
  534. #define CLKNAME_MAX 10 /* longest clock name */
  535. #define NEST_DELTA 2
  536. #define NEST_MAX 4
  537. static void
  538. dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
  539. {
  540. char *state;
  541. char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
  542. struct clk *clk;
  543. unsigned i;
  544. if (parent->flags & CLK_PLL)
  545. state = "pll";
  546. else if (parent->flags & CLK_PSC)
  547. state = "psc";
  548. else
  549. state = "";
  550. /* <nest spaces> name <pad to end> */
  551. memset(buf, ' ', sizeof(buf) - 1);
  552. buf[sizeof(buf) - 1] = 0;
  553. i = strlen(parent->name);
  554. memcpy(buf + nest, parent->name,
  555. min(i, (unsigned)(sizeof(buf) - 1 - nest)));
  556. seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
  557. buf, parent->usecount, state, clk_get_rate(parent));
  558. /* REVISIT show device associations too */
  559. /* cost is now small, but not linear... */
  560. list_for_each_entry(clk, &parent->children, childnode) {
  561. dump_clock(s, nest + NEST_DELTA, clk);
  562. }
  563. }
  564. static int davinci_ck_show(struct seq_file *m, void *v)
  565. {
  566. struct clk *clk;
  567. /*
  568. * Show clock tree; We trust nonzero usecounts equate to PSC enables...
  569. */
  570. mutex_lock(&clocks_mutex);
  571. list_for_each_entry(clk, &clocks, node)
  572. if (!clk->parent)
  573. dump_clock(m, 0, clk);
  574. mutex_unlock(&clocks_mutex);
  575. return 0;
  576. }
  577. static int davinci_ck_open(struct inode *inode, struct file *file)
  578. {
  579. return single_open(file, davinci_ck_show, NULL);
  580. }
  581. static const struct file_operations davinci_ck_operations = {
  582. .open = davinci_ck_open,
  583. .read = seq_read,
  584. .llseek = seq_lseek,
  585. .release = single_release,
  586. };
  587. static int __init davinci_clk_debugfs_init(void)
  588. {
  589. debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
  590. &davinci_ck_operations);
  591. return 0;
  592. }
  593. device_initcall(davinci_clk_debugfs_init);
  594. #endif /* CONFIG_DEBUG_FS */