dsi_pll_10nm.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /*
  2. * SPDX-License-Identifier: GPL-2.0
  3. * Copyright (c) 2018, The Linux Foundation
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/clk-provider.h>
  7. #include <linux/iopoll.h>
  8. #include "dsi_pll.h"
  9. #include "dsi.xml.h"
  10. /*
  11. * DSI PLL 10nm - clock diagram (eg: DSI0):
  12. *
  13. * dsi0_pll_out_div_clk dsi0_pll_bit_clk
  14. * | |
  15. * | |
  16. * +---------+ | +----------+ | +----+
  17. * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
  18. * +---------+ | +----------+ | +----+
  19. * | |
  20. * | | dsi0_pll_by_2_bit_clk
  21. * | | |
  22. * | | +----+ | |\ dsi0_pclk_mux
  23. * | |--| /2 |--o--| \ |
  24. * | | +----+ | \ | +---------+
  25. * | --------------| |--o--| div_7_4 |-- dsi0pll
  26. * |------------------------------| / +---------+
  27. * | +-----+ | /
  28. * -----------| /4? |--o----------|/
  29. * +-----+ | |
  30. * | |dsiclk_sel
  31. * |
  32. * dsi0_pll_post_out_div_clk
  33. */
  34. #define DSI_BYTE_PLL_CLK 0
  35. #define DSI_PIXEL_PLL_CLK 1
  36. #define NUM_PROVIDED_CLKS 2
  37. struct dsi_pll_regs {
  38. u32 pll_prop_gain_rate;
  39. u32 pll_lockdet_rate;
  40. u32 decimal_div_start;
  41. u32 frac_div_start_low;
  42. u32 frac_div_start_mid;
  43. u32 frac_div_start_high;
  44. u32 pll_clock_inverters;
  45. u32 ssc_stepsize_low;
  46. u32 ssc_stepsize_high;
  47. u32 ssc_div_per_low;
  48. u32 ssc_div_per_high;
  49. u32 ssc_adjper_low;
  50. u32 ssc_adjper_high;
  51. u32 ssc_control;
  52. };
  53. struct dsi_pll_config {
  54. u32 ref_freq;
  55. bool div_override;
  56. u32 output_div;
  57. bool ignore_frac;
  58. bool disable_prescaler;
  59. bool enable_ssc;
  60. bool ssc_center;
  61. u32 dec_bits;
  62. u32 frac_bits;
  63. u32 lock_timer;
  64. u32 ssc_freq;
  65. u32 ssc_offset;
  66. u32 ssc_adj_per;
  67. u32 thresh_cycles;
  68. u32 refclk_cycles;
  69. };
  70. struct pll_10nm_cached_state {
  71. unsigned long vco_rate;
  72. u8 bit_clk_div;
  73. u8 pix_clk_div;
  74. u8 pll_out_div;
  75. u8 pll_mux;
  76. };
  77. struct dsi_pll_10nm {
  78. struct msm_dsi_pll base;
  79. int id;
  80. struct platform_device *pdev;
  81. void __iomem *phy_cmn_mmio;
  82. void __iomem *mmio;
  83. u64 vco_ref_clk_rate;
  84. u64 vco_current_rate;
  85. /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
  86. spinlock_t postdiv_lock;
  87. int vco_delay;
  88. struct dsi_pll_config pll_configuration;
  89. struct dsi_pll_regs reg_setup;
  90. /* private clocks: */
  91. struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
  92. u32 num_hws;
  93. /* clock-provider: */
  94. struct clk_hw_onecell_data *hw_data;
  95. struct pll_10nm_cached_state cached_state;
  96. enum msm_dsi_phy_usecase uc;
  97. struct dsi_pll_10nm *slave;
  98. };
  99. #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base)
  100. /*
  101. * Global list of private DSI PLL struct pointers. We need this for Dual DSI
  102. * mode, where the master PLL's clk_ops needs access the slave's private data
  103. */
  104. static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
  105. static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
  106. {
  107. struct dsi_pll_config *config = &pll->pll_configuration;
  108. config->ref_freq = pll->vco_ref_clk_rate;
  109. config->output_div = 1;
  110. config->dec_bits = 8;
  111. config->frac_bits = 18;
  112. config->lock_timer = 64;
  113. config->ssc_freq = 31500;
  114. config->ssc_offset = 5000;
  115. config->ssc_adj_per = 2;
  116. config->thresh_cycles = 32;
  117. config->refclk_cycles = 256;
  118. config->div_override = false;
  119. config->ignore_frac = false;
  120. config->disable_prescaler = false;
  121. config->enable_ssc = false;
  122. config->ssc_center = 0;
  123. }
  124. static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
  125. {
  126. struct dsi_pll_config *config = &pll->pll_configuration;
  127. struct dsi_pll_regs *regs = &pll->reg_setup;
  128. u64 fref = pll->vco_ref_clk_rate;
  129. u64 pll_freq;
  130. u64 divider;
  131. u64 dec, dec_multiple;
  132. u32 frac;
  133. u64 multiplier;
  134. pll_freq = pll->vco_current_rate;
  135. if (config->disable_prescaler)
  136. divider = fref;
  137. else
  138. divider = fref * 2;
  139. multiplier = 1 << config->frac_bits;
  140. dec_multiple = div_u64(pll_freq * multiplier, divider);
  141. div_u64_rem(dec_multiple, multiplier, &frac);
  142. dec = div_u64(dec_multiple, multiplier);
  143. if (pll_freq <= 1900000000UL)
  144. regs->pll_prop_gain_rate = 8;
  145. else if (pll_freq <= 3000000000UL)
  146. regs->pll_prop_gain_rate = 10;
  147. else
  148. regs->pll_prop_gain_rate = 12;
  149. if (pll_freq < 1100000000UL)
  150. regs->pll_clock_inverters = 8;
  151. else
  152. regs->pll_clock_inverters = 0;
  153. regs->pll_lockdet_rate = config->lock_timer;
  154. regs->decimal_div_start = dec;
  155. regs->frac_div_start_low = (frac & 0xff);
  156. regs->frac_div_start_mid = (frac & 0xff00) >> 8;
  157. regs->frac_div_start_high = (frac & 0x30000) >> 16;
  158. }
  159. #define SSC_CENTER BIT(0)
  160. #define SSC_EN BIT(1)
  161. static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
  162. {
  163. struct dsi_pll_config *config = &pll->pll_configuration;
  164. struct dsi_pll_regs *regs = &pll->reg_setup;
  165. u32 ssc_per;
  166. u32 ssc_mod;
  167. u64 ssc_step_size;
  168. u64 frac;
  169. if (!config->enable_ssc) {
  170. DBG("SSC not enabled\n");
  171. return;
  172. }
  173. ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
  174. ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
  175. ssc_per -= ssc_mod;
  176. frac = regs->frac_div_start_low |
  177. (regs->frac_div_start_mid << 8) |
  178. (regs->frac_div_start_high << 16);
  179. ssc_step_size = regs->decimal_div_start;
  180. ssc_step_size *= (1 << config->frac_bits);
  181. ssc_step_size += frac;
  182. ssc_step_size *= config->ssc_offset;
  183. ssc_step_size *= (config->ssc_adj_per + 1);
  184. ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
  185. ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
  186. regs->ssc_div_per_low = ssc_per & 0xFF;
  187. regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
  188. regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
  189. regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
  190. regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
  191. regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
  192. regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
  193. pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
  194. regs->decimal_div_start, frac, config->frac_bits);
  195. pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
  196. ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
  197. }
  198. static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
  199. {
  200. void __iomem *base = pll->mmio;
  201. struct dsi_pll_regs *regs = &pll->reg_setup;
  202. if (pll->pll_configuration.enable_ssc) {
  203. pr_debug("SSC is enabled\n");
  204. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
  205. regs->ssc_stepsize_low);
  206. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
  207. regs->ssc_stepsize_high);
  208. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
  209. regs->ssc_div_per_low);
  210. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
  211. regs->ssc_div_per_high);
  212. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
  213. regs->ssc_adjper_low);
  214. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
  215. regs->ssc_adjper_high);
  216. pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
  217. SSC_EN | regs->ssc_control);
  218. }
  219. }
  220. static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
  221. {
  222. void __iomem *base = pll->mmio;
  223. pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
  224. pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
  225. pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
  226. pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
  227. pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
  228. pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
  229. pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
  230. 0xba);
  231. pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
  232. pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
  233. pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
  234. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
  235. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
  236. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
  237. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
  238. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
  239. 0x4c);
  240. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
  241. pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
  242. pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
  243. }
  244. static void dsi_pll_commit(struct dsi_pll_10nm *pll)
  245. {
  246. void __iomem *base = pll->mmio;
  247. struct dsi_pll_regs *reg = &pll->reg_setup;
  248. pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
  249. pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
  250. reg->decimal_div_start);
  251. pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
  252. reg->frac_div_start_low);
  253. pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
  254. reg->frac_div_start_mid);
  255. pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
  256. reg->frac_div_start_high);
  257. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
  258. pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
  259. pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
  260. pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
  261. reg->pll_clock_inverters);
  262. }
  263. static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
  264. unsigned long parent_rate)
  265. {
  266. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  267. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  268. DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
  269. parent_rate);
  270. pll_10nm->vco_current_rate = rate;
  271. pll_10nm->vco_ref_clk_rate = parent_rate;
  272. dsi_pll_setup_config(pll_10nm);
  273. dsi_pll_calc_dec_frac(pll_10nm);
  274. dsi_pll_calc_ssc(pll_10nm);
  275. dsi_pll_commit(pll_10nm);
  276. dsi_pll_config_hzindep_reg(pll_10nm);
  277. dsi_pll_ssc_commit(pll_10nm);
  278. /* flush, ensure all register writes are done*/
  279. wmb();
  280. return 0;
  281. }
  282. static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
  283. {
  284. int rc;
  285. u32 status = 0;
  286. u32 const delay_us = 100;
  287. u32 const timeout_us = 5000;
  288. rc = readl_poll_timeout_atomic(pll->mmio +
  289. REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
  290. status,
  291. ((status & BIT(0)) > 0),
  292. delay_us,
  293. timeout_us);
  294. if (rc)
  295. pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
  296. pll->id, status);
  297. return rc;
  298. }
  299. static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
  300. {
  301. u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
  302. pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
  303. pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
  304. data & ~BIT(5));
  305. ndelay(250);
  306. }
  307. static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
  308. {
  309. u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
  310. pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
  311. data | BIT(5));
  312. pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
  313. ndelay(250);
  314. }
  315. static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
  316. {
  317. u32 data;
  318. data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
  319. pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
  320. data & ~BIT(5));
  321. }
  322. static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
  323. {
  324. u32 data;
  325. data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
  326. pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
  327. data | BIT(5));
  328. }
  329. static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
  330. {
  331. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  332. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  333. int rc;
  334. dsi_pll_enable_pll_bias(pll_10nm);
  335. if (pll_10nm->slave)
  336. dsi_pll_enable_pll_bias(pll_10nm->slave);
  337. /* Start PLL */
  338. pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
  339. 0x01);
  340. /*
  341. * ensure all PLL configurations are written prior to checking
  342. * for PLL lock.
  343. */
  344. wmb();
  345. /* Check for PLL lock */
  346. rc = dsi_pll_10nm_lock_status(pll_10nm);
  347. if (rc) {
  348. pr_err("PLL(%d) lock failed\n", pll_10nm->id);
  349. goto error;
  350. }
  351. pll->pll_on = true;
  352. dsi_pll_enable_global_clk(pll_10nm);
  353. if (pll_10nm->slave)
  354. dsi_pll_enable_global_clk(pll_10nm->slave);
  355. pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
  356. 0x01);
  357. if (pll_10nm->slave)
  358. pll_write(pll_10nm->slave->phy_cmn_mmio +
  359. REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
  360. error:
  361. return rc;
  362. }
  363. static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
  364. {
  365. pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
  366. dsi_pll_disable_pll_bias(pll);
  367. }
  368. static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
  369. {
  370. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  371. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  372. /*
  373. * To avoid any stray glitches while abruptly powering down the PLL
  374. * make sure to gate the clock using the clock enable bit before
  375. * powering down the PLL
  376. */
  377. dsi_pll_disable_global_clk(pll_10nm);
  378. pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
  379. dsi_pll_disable_sub(pll_10nm);
  380. if (pll_10nm->slave) {
  381. dsi_pll_disable_global_clk(pll_10nm->slave);
  382. dsi_pll_disable_sub(pll_10nm->slave);
  383. }
  384. /* flush, ensure all register writes are done */
  385. wmb();
  386. pll->pll_on = false;
  387. }
  388. static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
  389. unsigned long parent_rate)
  390. {
  391. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  392. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  393. void __iomem *base = pll_10nm->mmio;
  394. u64 ref_clk = pll_10nm->vco_ref_clk_rate;
  395. u64 vco_rate = 0x0;
  396. u64 multiplier;
  397. u32 frac;
  398. u32 dec;
  399. u64 pll_freq, tmp64;
  400. dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
  401. dec &= 0xff;
  402. frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
  403. frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
  404. 0xff) << 8);
  405. frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
  406. 0x3) << 16);
  407. /*
  408. * TODO:
  409. * 1. Assumes prescaler is disabled
  410. * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
  411. */
  412. multiplier = 1 << 18;
  413. pll_freq = dec * (ref_clk * 2);
  414. tmp64 = (ref_clk * 2 * frac);
  415. pll_freq += div_u64(tmp64, multiplier);
  416. vco_rate = pll_freq;
  417. DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
  418. pll_10nm->id, (unsigned long)vco_rate, dec, frac);
  419. return (unsigned long)vco_rate;
  420. }
  421. static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
  422. .round_rate = msm_dsi_pll_helper_clk_round_rate,
  423. .set_rate = dsi_pll_10nm_vco_set_rate,
  424. .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
  425. .prepare = dsi_pll_10nm_vco_prepare,
  426. .unprepare = dsi_pll_10nm_vco_unprepare,
  427. };
  428. /*
  429. * PLL Callbacks
  430. */
  431. static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
  432. {
  433. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  434. struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
  435. void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
  436. u32 cmn_clk_cfg0, cmn_clk_cfg1;
  437. cached->pll_out_div = pll_read(pll_10nm->mmio +
  438. REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
  439. cached->pll_out_div &= 0x3;
  440. cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
  441. cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
  442. cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
  443. cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
  444. cached->pll_mux = cmn_clk_cfg1 & 0x3;
  445. DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
  446. pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
  447. cached->pix_clk_div, cached->pll_mux);
  448. }
  449. static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
  450. {
  451. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  452. struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
  453. void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
  454. u32 val;
  455. val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
  456. val &= ~0x3;
  457. val |= cached->pll_out_div;
  458. pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
  459. pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
  460. cached->bit_clk_div | (cached->pix_clk_div << 4));
  461. val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
  462. val &= ~0x3;
  463. val |= cached->pll_mux;
  464. pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
  465. DBG("DSI PLL%d", pll_10nm->id);
  466. return 0;
  467. }
  468. static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
  469. enum msm_dsi_phy_usecase uc)
  470. {
  471. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  472. void __iomem *base = pll_10nm->phy_cmn_mmio;
  473. u32 data = 0x0; /* internal PLL */
  474. DBG("DSI PLL%d", pll_10nm->id);
  475. switch (uc) {
  476. case MSM_DSI_PHY_STANDALONE:
  477. break;
  478. case MSM_DSI_PHY_MASTER:
  479. pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
  480. break;
  481. case MSM_DSI_PHY_SLAVE:
  482. data = 0x1; /* external PLL */
  483. break;
  484. default:
  485. return -EINVAL;
  486. }
  487. /* set PLL src */
  488. pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
  489. pll_10nm->uc = uc;
  490. return 0;
  491. }
  492. static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
  493. struct clk **byte_clk_provider,
  494. struct clk **pixel_clk_provider)
  495. {
  496. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  497. struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
  498. DBG("DSI PLL%d", pll_10nm->id);
  499. if (byte_clk_provider)
  500. *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
  501. if (pixel_clk_provider)
  502. *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
  503. return 0;
  504. }
  505. static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
  506. {
  507. struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
  508. DBG("DSI PLL%d", pll_10nm->id);
  509. }
  510. /*
  511. * The post dividers and mux clocks are created using the standard divider and
  512. * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
  513. * state to follow the master PLL's divider/mux state. Therefore, we don't
  514. * require special clock ops that also configure the slave PLL registers
  515. */
  516. static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
  517. {
  518. char clk_name[32], parent[32], vco_name[32];
  519. char parent2[32], parent3[32], parent4[32];
  520. struct clk_init_data vco_init = {
  521. .parent_names = (const char *[]){ "xo" },
  522. .num_parents = 1,
  523. .name = vco_name,
  524. .flags = CLK_IGNORE_UNUSED,
  525. .ops = &clk_ops_dsi_pll_10nm_vco,
  526. };
  527. struct device *dev = &pll_10nm->pdev->dev;
  528. struct clk_hw **hws = pll_10nm->hws;
  529. struct clk_hw_onecell_data *hw_data;
  530. struct clk_hw *hw;
  531. int num = 0;
  532. int ret;
  533. DBG("DSI%d", pll_10nm->id);
  534. hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
  535. NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
  536. GFP_KERNEL);
  537. if (!hw_data)
  538. return -ENOMEM;
  539. snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
  540. pll_10nm->base.clk_hw.init = &vco_init;
  541. ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
  542. if (ret)
  543. return ret;
  544. hws[num++] = &pll_10nm->base.clk_hw;
  545. snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
  546. snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
  547. hw = clk_hw_register_divider(dev, clk_name,
  548. parent, CLK_SET_RATE_PARENT,
  549. pll_10nm->mmio +
  550. REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
  551. 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
  552. if (IS_ERR(hw))
  553. return PTR_ERR(hw);
  554. hws[num++] = hw;
  555. snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
  556. snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
  557. /* BIT CLK: DIV_CTRL_3_0 */
  558. hw = clk_hw_register_divider(dev, clk_name, parent,
  559. CLK_SET_RATE_PARENT,
  560. pll_10nm->phy_cmn_mmio +
  561. REG_DSI_10nm_PHY_CMN_CLK_CFG0,
  562. 0, 4, CLK_DIVIDER_ONE_BASED,
  563. &pll_10nm->postdiv_lock);
  564. if (IS_ERR(hw))
  565. return PTR_ERR(hw);
  566. hws[num++] = hw;
  567. snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
  568. snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
  569. /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
  570. hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
  571. CLK_SET_RATE_PARENT, 1, 8);
  572. if (IS_ERR(hw))
  573. return PTR_ERR(hw);
  574. hws[num++] = hw;
  575. hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
  576. snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
  577. snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
  578. hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
  579. 0, 1, 2);
  580. if (IS_ERR(hw))
  581. return PTR_ERR(hw);
  582. hws[num++] = hw;
  583. snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
  584. snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
  585. hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
  586. 0, 1, 4);
  587. if (IS_ERR(hw))
  588. return PTR_ERR(hw);
  589. hws[num++] = hw;
  590. snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
  591. snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
  592. snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
  593. snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
  594. snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
  595. hw = clk_hw_register_mux(dev, clk_name,
  596. (const char *[]){
  597. parent, parent2, parent3, parent4
  598. }, 4, 0, pll_10nm->phy_cmn_mmio +
  599. REG_DSI_10nm_PHY_CMN_CLK_CFG1,
  600. 0, 2, 0, NULL);
  601. if (IS_ERR(hw))
  602. return PTR_ERR(hw);
  603. hws[num++] = hw;
  604. snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
  605. snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
  606. /* PIX CLK DIV : DIV_CTRL_7_4*/
  607. hw = clk_hw_register_divider(dev, clk_name, parent,
  608. 0, pll_10nm->phy_cmn_mmio +
  609. REG_DSI_10nm_PHY_CMN_CLK_CFG0,
  610. 4, 4, CLK_DIVIDER_ONE_BASED,
  611. &pll_10nm->postdiv_lock);
  612. if (IS_ERR(hw))
  613. return PTR_ERR(hw);
  614. hws[num++] = hw;
  615. hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
  616. pll_10nm->num_hws = num;
  617. hw_data->num = NUM_PROVIDED_CLKS;
  618. pll_10nm->hw_data = hw_data;
  619. ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
  620. pll_10nm->hw_data);
  621. if (ret) {
  622. dev_err(dev, "failed to register clk provider: %d\n", ret);
  623. return ret;
  624. }
  625. return 0;
  626. }
  627. struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
  628. {
  629. struct dsi_pll_10nm *pll_10nm;
  630. struct msm_dsi_pll *pll;
  631. int ret;
  632. if (!pdev)
  633. return ERR_PTR(-ENODEV);
  634. pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
  635. if (!pll_10nm)
  636. return ERR_PTR(-ENOMEM);
  637. DBG("DSI PLL%d", id);
  638. pll_10nm->pdev = pdev;
  639. pll_10nm->id = id;
  640. pll_10nm_list[id] = pll_10nm;
  641. pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
  642. if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
  643. dev_err(&pdev->dev, "failed to map CMN PHY base\n");
  644. return ERR_PTR(-ENOMEM);
  645. }
  646. pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
  647. if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
  648. dev_err(&pdev->dev, "failed to map PLL base\n");
  649. return ERR_PTR(-ENOMEM);
  650. }
  651. pll = &pll_10nm->base;
  652. pll->min_rate = 1000000000UL;
  653. pll->max_rate = 3500000000UL;
  654. pll->get_provider = dsi_pll_10nm_get_provider;
  655. pll->destroy = dsi_pll_10nm_destroy;
  656. pll->save_state = dsi_pll_10nm_save_state;
  657. pll->restore_state = dsi_pll_10nm_restore_state;
  658. pll->set_usecase = dsi_pll_10nm_set_usecase;
  659. pll_10nm->vco_delay = 1;
  660. ret = pll_10nm_register(pll_10nm);
  661. if (ret) {
  662. dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
  663. return ERR_PTR(ret);
  664. }
  665. /* TODO: Remove this when we have proper display handover support */
  666. msm_dsi_pll_save_state(pll);
  667. return pll;
  668. }