dsi_phy_10nm.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * SPDX-License-Identifier: GPL-2.0
  3. * Copyright (c) 2018, The Linux Foundation
  4. */
  5. #include <linux/iopoll.h>
  6. #include "dsi_phy.h"
  7. #include "dsi.xml.h"
  8. static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
  9. {
  10. void __iomem *base = phy->base;
  11. u32 data = 0;
  12. data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
  13. mb(); /* make sure read happened */
  14. return (data & BIT(0));
  15. }
  16. static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
  17. {
  18. void __iomem *lane_base = phy->lane_base;
  19. int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
  20. /*
  21. * LPRX and CDRX need to enabled only for physical data lane
  22. * corresponding to the logical data lane 0
  23. */
  24. if (enable)
  25. dsi_phy_write(lane_base +
  26. REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
  27. else
  28. dsi_phy_write(lane_base +
  29. REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
  30. }
  31. static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
  32. {
  33. int i;
  34. u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
  35. void __iomem *lane_base = phy->lane_base;
  36. /* Strength ctrl settings */
  37. for (i = 0; i < 5; i++) {
  38. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
  39. 0x55);
  40. /*
  41. * Disable LPRX and CDRX for all lanes. And later on, it will
  42. * be only enabled for the physical data lane corresponding
  43. * to the logical data lane 0
  44. */
  45. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
  46. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
  47. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
  48. 0x88);
  49. }
  50. dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
  51. /* other settings */
  52. for (i = 0; i < 5; i++) {
  53. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
  54. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
  55. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
  56. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
  57. i == 4 ? 0x80 : 0x0);
  58. dsi_phy_write(lane_base +
  59. REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
  60. dsi_phy_write(lane_base +
  61. REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
  62. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
  63. tx_dctrl[i]);
  64. }
  65. /* Toggle BIT 0 to release freeze I/0 */
  66. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
  67. dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
  68. }
  69. static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
  70. struct msm_dsi_phy_clk_request *clk_req)
  71. {
  72. int ret;
  73. u32 status;
  74. u32 const delay_us = 5;
  75. u32 const timeout_us = 1000;
  76. struct msm_dsi_dphy_timing *timing = &phy->timing;
  77. void __iomem *base = phy->base;
  78. u32 data;
  79. DBG("");
  80. if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
  81. dev_err(&phy->pdev->dev,
  82. "%s: D-PHY timing calculation failed\n", __func__);
  83. return -EINVAL;
  84. }
  85. if (dsi_phy_hw_v3_0_is_pll_on(phy))
  86. pr_warn("PLL turned on before configuring PHY\n");
  87. /* wait for REFGEN READY */
  88. ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
  89. status, (status & BIT(0)),
  90. delay_us, timeout_us);
  91. if (ret) {
  92. pr_err("Ref gen not ready. Aborting\n");
  93. return -EINVAL;
  94. }
  95. /* de-assert digital and pll power down */
  96. data = BIT(6) | BIT(5);
  97. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
  98. /* Assert PLL core reset */
  99. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
  100. /* turn off resync FIFO */
  101. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
  102. /* Select MS1 byte-clk */
  103. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
  104. /* Enable LDO */
  105. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
  106. /* Configure PHY lane swap (TODO: we need to calculate this) */
  107. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
  108. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
  109. /* DSI PHY timings */
  110. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
  111. timing->hs_halfbyte_en);
  112. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
  113. timing->clk_zero);
  114. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
  115. timing->clk_prepare);
  116. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
  117. timing->clk_trail);
  118. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
  119. timing->hs_exit);
  120. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
  121. timing->hs_zero);
  122. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
  123. timing->hs_prepare);
  124. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
  125. timing->hs_trail);
  126. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
  127. timing->hs_rqst);
  128. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
  129. timing->ta_go | (timing->ta_sure << 3));
  130. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
  131. timing->ta_get);
  132. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
  133. 0x00);
  134. /* Remove power down from all blocks */
  135. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
  136. /* power up lanes */
  137. data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
  138. /* TODO: only power up lanes that are used */
  139. data |= 0x1F;
  140. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
  141. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
  142. /* Select full-rate mode */
  143. dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
  144. ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
  145. if (ret) {
  146. dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
  147. __func__, ret);
  148. return ret;
  149. }
  150. /* DSI lane settings */
  151. dsi_phy_hw_v3_0_lane_settings(phy);
  152. DBG("DSI%d PHY enabled", phy->id);
  153. return 0;
  154. }
  155. static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
  156. {
  157. }
  158. static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
  159. {
  160. struct platform_device *pdev = phy->pdev;
  161. phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
  162. "DSI_PHY_LANE");
  163. if (IS_ERR(phy->lane_base)) {
  164. dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
  165. __func__);
  166. return -ENOMEM;
  167. }
  168. return 0;
  169. }
  170. const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
  171. .type = MSM_DSI_PHY_10NM,
  172. .src_pll_truthtable = { {false, false}, {true, false} },
  173. .reg_cfg = {
  174. .num = 1,
  175. .regs = {
  176. {"vdds", 36000, 32},
  177. },
  178. },
  179. .ops = {
  180. .enable = dsi_10nm_phy_enable,
  181. .disable = dsi_10nm_phy_disable,
  182. .init = dsi_10nm_phy_init,
  183. },
  184. .io_start = { 0xae94400, 0xae96400 },
  185. .num_dsi_phy = 2,
  186. };