dsi_phy.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/platform_device.h>
  14. #include "dsi_phy.h"
  15. #define S_DIV_ROUND_UP(n, d) \
  16. (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
  17. static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
  18. s32 min_result, bool even)
  19. {
  20. s32 v;
  21. v = (tmax - tmin) * percent;
  22. v = S_DIV_ROUND_UP(v, 100) + tmin;
  23. if (even && (v & 0x1))
  24. return max_t(s32, min_result, v - 1);
  25. else
  26. return max_t(s32, min_result, v);
  27. }
  28. static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
  29. s32 ui, s32 coeff, s32 pcnt)
  30. {
  31. s32 tmax, tmin, clk_z;
  32. s32 temp;
  33. /* reset */
  34. temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
  35. tmin = S_DIV_ROUND_UP(temp, ui) - 2;
  36. if (tmin > 255) {
  37. tmax = 511;
  38. clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
  39. } else {
  40. tmax = 255;
  41. clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
  42. }
  43. /* adjust */
  44. temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
  45. timing->clk_zero = clk_z + 8 - temp;
  46. }
  47. int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
  48. struct msm_dsi_phy_clk_request *clk_req)
  49. {
  50. const unsigned long bit_rate = clk_req->bitclk_rate;
  51. const unsigned long esc_rate = clk_req->escclk_rate;
  52. s32 ui, lpx;
  53. s32 tmax, tmin;
  54. s32 pcnt0 = 10;
  55. s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
  56. s32 pcnt2 = 10;
  57. s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
  58. s32 coeff = 1000; /* Precision, should avoid overflow */
  59. s32 temp;
  60. if (!bit_rate || !esc_rate)
  61. return -EINVAL;
  62. ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
  63. lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
  64. tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
  65. tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
  66. timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
  67. temp = lpx / ui;
  68. if (temp & 0x1)
  69. timing->hs_rqst = temp;
  70. else
  71. timing->hs_rqst = max_t(s32, 0, temp - 2);
  72. /* Calculate clk_zero after clk_prepare and hs_rqst */
  73. dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
  74. temp = 105 * coeff + 12 * ui - 20 * coeff;
  75. tmax = S_DIV_ROUND_UP(temp, ui) - 2;
  76. tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
  77. timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
  78. temp = 85 * coeff + 6 * ui;
  79. tmax = S_DIV_ROUND_UP(temp, ui) - 2;
  80. temp = 40 * coeff + 4 * ui;
  81. tmin = S_DIV_ROUND_UP(temp, ui) - 2;
  82. timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
  83. tmax = 255;
  84. temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
  85. temp = 145 * coeff + 10 * ui - temp;
  86. tmin = S_DIV_ROUND_UP(temp, ui) - 2;
  87. timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
  88. temp = 105 * coeff + 12 * ui - 20 * coeff;
  89. tmax = S_DIV_ROUND_UP(temp, ui) - 2;
  90. temp = 60 * coeff + 4 * ui;
  91. tmin = DIV_ROUND_UP(temp, ui) - 2;
  92. timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
  93. tmax = 255;
  94. tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
  95. timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
  96. tmax = 63;
  97. temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
  98. temp = 60 * coeff + 52 * ui - 24 * ui - temp;
  99. tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
  100. timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
  101. false);
  102. tmax = 63;
  103. temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
  104. temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
  105. temp += 8 * ui + lpx;
  106. tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
  107. if (tmin > tmax) {
  108. temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
  109. timing->shared_timings.clk_pre = temp >> 1;
  110. timing->shared_timings.clk_pre_inc_by_2 = true;
  111. } else {
  112. timing->shared_timings.clk_pre =
  113. linear_inter(tmax, tmin, pcnt2, 0, false);
  114. timing->shared_timings.clk_pre_inc_by_2 = false;
  115. }
  116. timing->ta_go = 3;
  117. timing->ta_sure = 0;
  118. timing->ta_get = 4;
  119. DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
  120. timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
  121. timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
  122. timing->clk_trail, timing->clk_prepare, timing->hs_exit,
  123. timing->hs_zero, timing->hs_prepare, timing->hs_trail,
  124. timing->hs_rqst);
  125. return 0;
  126. }
  127. int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
  128. struct msm_dsi_phy_clk_request *clk_req)
  129. {
  130. const unsigned long bit_rate = clk_req->bitclk_rate;
  131. const unsigned long esc_rate = clk_req->escclk_rate;
  132. s32 ui, ui_x8, lpx;
  133. s32 tmax, tmin;
  134. s32 pcnt0 = 50;
  135. s32 pcnt1 = 50;
  136. s32 pcnt2 = 10;
  137. s32 pcnt3 = 30;
  138. s32 pcnt4 = 10;
  139. s32 pcnt5 = 2;
  140. s32 coeff = 1000; /* Precision, should avoid overflow */
  141. s32 hb_en, hb_en_ckln, pd_ckln, pd;
  142. s32 val, val_ckln;
  143. s32 temp;
  144. if (!bit_rate || !esc_rate)
  145. return -EINVAL;
  146. timing->hs_halfbyte_en = 0;
  147. hb_en = 0;
  148. timing->hs_halfbyte_en_ckln = 0;
  149. hb_en_ckln = 0;
  150. timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
  151. pd_ckln = timing->hs_prep_dly_ckln;
  152. timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
  153. pd = timing->hs_prep_dly;
  154. val = (hb_en << 2) + (pd << 1);
  155. val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
  156. ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
  157. ui_x8 = ui << 3;
  158. lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
  159. temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
  160. tmin = max_t(s32, temp, 0);
  161. temp = (95 * coeff - val_ckln * ui) / ui_x8;
  162. tmax = max_t(s32, temp, 0);
  163. timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
  164. temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
  165. tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
  166. tmax = (tmin > 255) ? 511 : 255;
  167. timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
  168. tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
  169. temp = 105 * coeff + 12 * ui - 20 * coeff;
  170. tmax = (temp + 3 * ui) / ui_x8;
  171. timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
  172. temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
  173. tmin = max_t(s32, temp, 0);
  174. temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
  175. tmax = max_t(s32, temp, 0);
  176. timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
  177. temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
  178. tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
  179. tmax = 255;
  180. timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
  181. tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
  182. temp = 105 * coeff + 12 * ui - 20 * coeff;
  183. tmax = (temp + 3 * ui) / ui_x8;
  184. timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
  185. temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
  186. timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
  187. tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
  188. tmax = 255;
  189. timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
  190. temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
  191. timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
  192. temp = 60 * coeff + 52 * ui - 43 * ui;
  193. tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
  194. tmax = 63;
  195. timing->shared_timings.clk_post =
  196. linear_inter(tmax, tmin, pcnt2, 0, false);
  197. temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
  198. temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
  199. temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
  200. (((timing->hs_rqst_ckln << 3) + 8) * ui);
  201. tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
  202. tmax = 63;
  203. if (tmin > tmax) {
  204. temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
  205. timing->shared_timings.clk_pre = temp >> 1;
  206. timing->shared_timings.clk_pre_inc_by_2 = 1;
  207. } else {
  208. timing->shared_timings.clk_pre =
  209. linear_inter(tmax, tmin, pcnt2, 0, false);
  210. timing->shared_timings.clk_pre_inc_by_2 = 0;
  211. }
  212. timing->ta_go = 3;
  213. timing->ta_sure = 0;
  214. timing->ta_get = 4;
  215. DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
  216. timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
  217. timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
  218. timing->clk_trail, timing->clk_prepare, timing->hs_exit,
  219. timing->hs_zero, timing->hs_prepare, timing->hs_trail,
  220. timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
  221. timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
  222. timing->hs_prep_dly_ckln);
  223. return 0;
  224. }
  225. void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
  226. u32 bit_mask)
  227. {
  228. int phy_id = phy->id;
  229. u32 val;
  230. if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
  231. return;
  232. val = dsi_phy_read(phy->base + reg);
  233. if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
  234. dsi_phy_write(phy->base + reg, val | bit_mask);
  235. else
  236. dsi_phy_write(phy->base + reg, val & (~bit_mask));
  237. }
  238. static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
  239. {
  240. struct regulator_bulk_data *s = phy->supplies;
  241. const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
  242. struct device *dev = &phy->pdev->dev;
  243. int num = phy->cfg->reg_cfg.num;
  244. int i, ret;
  245. for (i = 0; i < num; i++)
  246. s[i].supply = regs[i].name;
  247. ret = devm_regulator_bulk_get(dev, num, s);
  248. if (ret < 0) {
  249. dev_err(dev, "%s: failed to init regulator, ret=%d\n",
  250. __func__, ret);
  251. return ret;
  252. }
  253. return 0;
  254. }
  255. static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
  256. {
  257. struct regulator_bulk_data *s = phy->supplies;
  258. const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
  259. int num = phy->cfg->reg_cfg.num;
  260. int i;
  261. DBG("");
  262. for (i = num - 1; i >= 0; i--)
  263. if (regs[i].disable_load >= 0)
  264. regulator_set_load(s[i].consumer, regs[i].disable_load);
  265. regulator_bulk_disable(num, s);
  266. }
  267. static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
  268. {
  269. struct regulator_bulk_data *s = phy->supplies;
  270. const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
  271. struct device *dev = &phy->pdev->dev;
  272. int num = phy->cfg->reg_cfg.num;
  273. int ret, i;
  274. DBG("");
  275. for (i = 0; i < num; i++) {
  276. if (regs[i].enable_load >= 0) {
  277. ret = regulator_set_load(s[i].consumer,
  278. regs[i].enable_load);
  279. if (ret < 0) {
  280. dev_err(dev,
  281. "regulator %d set op mode failed, %d\n",
  282. i, ret);
  283. goto fail;
  284. }
  285. }
  286. }
  287. ret = regulator_bulk_enable(num, s);
  288. if (ret < 0) {
  289. dev_err(dev, "regulator enable failed, %d\n", ret);
  290. goto fail;
  291. }
  292. return 0;
  293. fail:
  294. for (i--; i >= 0; i--)
  295. regulator_set_load(s[i].consumer, regs[i].disable_load);
  296. return ret;
  297. }
  298. static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
  299. {
  300. struct device *dev = &phy->pdev->dev;
  301. int ret;
  302. pm_runtime_get_sync(dev);
  303. ret = clk_prepare_enable(phy->ahb_clk);
  304. if (ret) {
  305. dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
  306. pm_runtime_put_sync(dev);
  307. }
  308. return ret;
  309. }
  310. static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
  311. {
  312. clk_disable_unprepare(phy->ahb_clk);
  313. pm_runtime_put_autosuspend(&phy->pdev->dev);
  314. }
  315. static const struct of_device_id dsi_phy_dt_match[] = {
  316. #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
  317. { .compatible = "qcom,dsi-phy-28nm-hpm",
  318. .data = &dsi_phy_28nm_hpm_cfgs },
  319. { .compatible = "qcom,dsi-phy-28nm-lp",
  320. .data = &dsi_phy_28nm_lp_cfgs },
  321. #endif
  322. #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
  323. { .compatible = "qcom,dsi-phy-20nm",
  324. .data = &dsi_phy_20nm_cfgs },
  325. #endif
  326. #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
  327. { .compatible = "qcom,dsi-phy-28nm-8960",
  328. .data = &dsi_phy_28nm_8960_cfgs },
  329. #endif
  330. #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
  331. { .compatible = "qcom,dsi-phy-14nm",
  332. .data = &dsi_phy_14nm_cfgs },
  333. #endif
  334. {}
  335. };
  336. /*
  337. * Currently, we only support one SoC for each PHY type. When we have multiple
  338. * SoCs for the same PHY, we can try to make the index searching a bit more
  339. * clever.
  340. */
  341. static int dsi_phy_get_id(struct msm_dsi_phy *phy)
  342. {
  343. struct platform_device *pdev = phy->pdev;
  344. const struct msm_dsi_phy_cfg *cfg = phy->cfg;
  345. struct resource *res;
  346. int i;
  347. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
  348. if (!res)
  349. return -EINVAL;
  350. for (i = 0; i < cfg->num_dsi_phy; i++) {
  351. if (cfg->io_start[i] == res->start)
  352. return i;
  353. }
  354. return -EINVAL;
  355. }
  356. int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
  357. {
  358. struct platform_device *pdev = phy->pdev;
  359. int ret = 0;
  360. phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
  361. "DSI_PHY_REG");
  362. if (IS_ERR(phy->reg_base)) {
  363. dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
  364. __func__);
  365. ret = -ENOMEM;
  366. goto fail;
  367. }
  368. fail:
  369. return ret;
  370. }
  371. static int dsi_phy_driver_probe(struct platform_device *pdev)
  372. {
  373. struct msm_dsi_phy *phy;
  374. struct device *dev = &pdev->dev;
  375. const struct of_device_id *match;
  376. int ret;
  377. phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
  378. if (!phy)
  379. return -ENOMEM;
  380. match = of_match_node(dsi_phy_dt_match, dev->of_node);
  381. if (!match)
  382. return -ENODEV;
  383. phy->cfg = match->data;
  384. phy->pdev = pdev;
  385. phy->id = dsi_phy_get_id(phy);
  386. if (phy->id < 0) {
  387. ret = phy->id;
  388. dev_err(dev, "%s: couldn't identify PHY index, %d\n",
  389. __func__, ret);
  390. goto fail;
  391. }
  392. phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
  393. "qcom,dsi-phy-regulator-ldo-mode");
  394. phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
  395. if (IS_ERR(phy->base)) {
  396. dev_err(dev, "%s: failed to map phy base\n", __func__);
  397. ret = -ENOMEM;
  398. goto fail;
  399. }
  400. ret = dsi_phy_regulator_init(phy);
  401. if (ret) {
  402. dev_err(dev, "%s: failed to init regulator\n", __func__);
  403. goto fail;
  404. }
  405. phy->ahb_clk = devm_clk_get(dev, "iface_clk");
  406. if (IS_ERR(phy->ahb_clk)) {
  407. dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
  408. ret = PTR_ERR(phy->ahb_clk);
  409. goto fail;
  410. }
  411. if (phy->cfg->ops.init) {
  412. ret = phy->cfg->ops.init(phy);
  413. if (ret)
  414. goto fail;
  415. }
  416. /* PLL init will call into clk_register which requires
  417. * register access, so we need to enable power and ahb clock.
  418. */
  419. ret = dsi_phy_enable_resource(phy);
  420. if (ret)
  421. goto fail;
  422. phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
  423. if (!phy->pll)
  424. dev_info(dev,
  425. "%s: pll init failed, need separate pll clk driver\n",
  426. __func__);
  427. dsi_phy_disable_resource(phy);
  428. platform_set_drvdata(pdev, phy);
  429. return 0;
  430. fail:
  431. return ret;
  432. }
  433. static int dsi_phy_driver_remove(struct platform_device *pdev)
  434. {
  435. struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
  436. if (phy && phy->pll) {
  437. msm_dsi_pll_destroy(phy->pll);
  438. phy->pll = NULL;
  439. }
  440. platform_set_drvdata(pdev, NULL);
  441. return 0;
  442. }
  443. static struct platform_driver dsi_phy_platform_driver = {
  444. .probe = dsi_phy_driver_probe,
  445. .remove = dsi_phy_driver_remove,
  446. .driver = {
  447. .name = "msm_dsi_phy",
  448. .of_match_table = dsi_phy_dt_match,
  449. },
  450. };
  451. void __init msm_dsi_phy_driver_register(void)
  452. {
  453. platform_driver_register(&dsi_phy_platform_driver);
  454. }
  455. void __exit msm_dsi_phy_driver_unregister(void)
  456. {
  457. platform_driver_unregister(&dsi_phy_platform_driver);
  458. }
  459. int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
  460. struct msm_dsi_phy_clk_request *clk_req)
  461. {
  462. struct device *dev = &phy->pdev->dev;
  463. int ret;
  464. if (!phy || !phy->cfg->ops.enable)
  465. return -EINVAL;
  466. ret = dsi_phy_enable_resource(phy);
  467. if (ret) {
  468. dev_err(dev, "%s: resource enable failed, %d\n",
  469. __func__, ret);
  470. goto res_en_fail;
  471. }
  472. ret = dsi_phy_regulator_enable(phy);
  473. if (ret) {
  474. dev_err(dev, "%s: regulator enable failed, %d\n",
  475. __func__, ret);
  476. goto reg_en_fail;
  477. }
  478. ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
  479. if (ret) {
  480. dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
  481. goto phy_en_fail;
  482. }
  483. /*
  484. * Resetting DSI PHY silently changes its PLL registers to reset status,
  485. * which will confuse clock driver and result in wrong output rate of
  486. * link clocks. Restore PLL status if its PLL is being used as clock
  487. * source.
  488. */
  489. if (phy->usecase != MSM_DSI_PHY_SLAVE) {
  490. ret = msm_dsi_pll_restore_state(phy->pll);
  491. if (ret) {
  492. dev_err(dev, "%s: failed to restore pll state, %d\n",
  493. __func__, ret);
  494. goto pll_restor_fail;
  495. }
  496. }
  497. return 0;
  498. pll_restor_fail:
  499. if (phy->cfg->ops.disable)
  500. phy->cfg->ops.disable(phy);
  501. phy_en_fail:
  502. dsi_phy_regulator_disable(phy);
  503. reg_en_fail:
  504. dsi_phy_disable_resource(phy);
  505. res_en_fail:
  506. return ret;
  507. }
  508. void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
  509. {
  510. if (!phy || !phy->cfg->ops.disable)
  511. return;
  512. /* Save PLL status if it is a clock source */
  513. if (phy->usecase != MSM_DSI_PHY_SLAVE)
  514. msm_dsi_pll_save_state(phy->pll);
  515. phy->cfg->ops.disable(phy);
  516. dsi_phy_regulator_disable(phy);
  517. dsi_phy_disable_resource(phy);
  518. }
  519. void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
  520. struct msm_dsi_phy_shared_timings *shared_timings)
  521. {
  522. memcpy(shared_timings, &phy->timing.shared_timings,
  523. sizeof(*shared_timings));
  524. }
  525. struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
  526. {
  527. if (!phy)
  528. return NULL;
  529. return phy->pll;
  530. }
  531. void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
  532. enum msm_dsi_phy_usecase uc)
  533. {
  534. if (phy)
  535. phy->usecase = uc;
  536. }