intel_dpio_phy.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Copyright © 2014-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. #include "intel_drv.h"
  24. /**
  25. * DOC: DPIO
  26. *
  27. * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
  28. * ports. DPIO is the name given to such a display PHY. These PHYs
  29. * don't follow the standard programming model using direct MMIO
  30. * registers, and instead their registers must be accessed trough IOSF
  31. * sideband. VLV has one such PHY for driving ports B and C, and CHV
  32. * adds another PHY for driving port D. Each PHY responds to specific
  33. * IOSF-SB port.
  34. *
  35. * Each display PHY is made up of one or two channels. Each channel
  36. * houses a common lane part which contains the PLL and other common
  37. * logic. CH0 common lane also contains the IOSF-SB logic for the
  38. * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
  39. * must be running when any DPIO registers are accessed.
  40. *
  41. * In addition to having their own registers, the PHYs are also
  42. * controlled through some dedicated signals from the display
  43. * controller. These include PLL reference clock enable, PLL enable,
  44. * and CRI clock selection, for example.
  45. *
  46. * Eeach channel also has two splines (also called data lanes), and
  47. * each spline is made up of one Physical Access Coding Sub-Layer
  48. * (PCS) block and two TX lanes. So each channel has two PCS blocks
  49. * and four TX lanes. The TX lanes are used as DP lanes or TMDS
  50. * data/clock pairs depending on the output type.
  51. *
  52. * Additionally the PHY also contains an AUX lane with AUX blocks
  53. * for each channel. This is used for DP AUX communication, but
  54. * this fact isn't really relevant for the driver since AUX is
  55. * controlled from the display controller side. No DPIO registers
  56. * need to be accessed during AUX communication,
  57. *
  58. * Generally on VLV/CHV the common lane corresponds to the pipe and
  59. * the spline (PCS/TX) corresponds to the port.
  60. *
  61. * For dual channel PHY (VLV/CHV):
  62. *
  63. * pipe A == CMN/PLL/REF CH0
  64. *
  65. * pipe B == CMN/PLL/REF CH1
  66. *
  67. * port B == PCS/TX CH0
  68. *
  69. * port C == PCS/TX CH1
  70. *
  71. * This is especially important when we cross the streams
  72. * ie. drive port B with pipe B, or port C with pipe A.
  73. *
  74. * For single channel PHY (CHV):
  75. *
  76. * pipe C == CMN/PLL/REF CH0
  77. *
  78. * port D == PCS/TX CH0
  79. *
  80. * On BXT the entire PHY channel corresponds to the port. That means
  81. * the PLL is also now associated with the port rather than the pipe,
  82. * and so the clock needs to be routed to the appropriate transcoder.
  83. * Port A PLL is directly connected to transcoder EDP and port B/C
  84. * PLLs can be routed to any transcoder A/B/C.
  85. *
  86. * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
  87. * digital port D (CHV) or port A (BXT). ::
  88. *
  89. *
  90. * Dual channel PHY (VLV/CHV/BXT)
  91. * ---------------------------------
  92. * | CH0 | CH1 |
  93. * | CMN/PLL/REF | CMN/PLL/REF |
  94. * |---------------|---------------| Display PHY
  95. * | PCS01 | PCS23 | PCS01 | PCS23 |
  96. * |-------|-------|-------|-------|
  97. * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
  98. * ---------------------------------
  99. * | DDI0 | DDI1 | DP/HDMI ports
  100. * ---------------------------------
  101. *
  102. * Single channel PHY (CHV/BXT)
  103. * -----------------
  104. * | CH0 |
  105. * | CMN/PLL/REF |
  106. * |---------------| Display PHY
  107. * | PCS01 | PCS23 |
  108. * |-------|-------|
  109. * |TX0|TX1|TX2|TX3|
  110. * -----------------
  111. * | DDI2 | DP/HDMI port
  112. * -----------------
  113. */
  114. /**
  115. * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
  116. */
  117. struct bxt_ddi_phy_info {
  118. /**
  119. * @dual_channel: true if this phy has a second channel.
  120. */
  121. bool dual_channel;
  122. /**
  123. * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
  124. * Otherwise the GRC value will be copied from the phy indicated by
  125. * this field.
  126. */
  127. enum dpio_phy rcomp_phy;
  128. /**
  129. * @channel: struct containing per channel information.
  130. */
  131. struct {
  132. /**
  133. * @port: which port maps to this channel.
  134. */
  135. enum port port;
  136. } channel[2];
  137. };
  138. static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
  139. [DPIO_PHY0] = {
  140. .dual_channel = true,
  141. .rcomp_phy = DPIO_PHY1,
  142. .channel = {
  143. [DPIO_CH0] = { .port = PORT_B },
  144. [DPIO_CH1] = { .port = PORT_C },
  145. }
  146. },
  147. [DPIO_PHY1] = {
  148. .dual_channel = false,
  149. .rcomp_phy = -1,
  150. .channel = {
  151. [DPIO_CH0] = { .port = PORT_A },
  152. }
  153. },
  154. };
  155. static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
  156. {
  157. return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
  158. BIT(phy_info->channel[DPIO_CH0].port);
  159. }
  160. void bxt_port_to_phy_channel(enum port port,
  161. enum dpio_phy *phy, enum dpio_channel *ch)
  162. {
  163. const struct bxt_ddi_phy_info *phy_info;
  164. int i;
  165. for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
  166. phy_info = &bxt_ddi_phy_info[i];
  167. if (port == phy_info->channel[DPIO_CH0].port) {
  168. *phy = i;
  169. *ch = DPIO_CH0;
  170. return;
  171. }
  172. if (phy_info->dual_channel &&
  173. port == phy_info->channel[DPIO_CH1].port) {
  174. *phy = i;
  175. *ch = DPIO_CH1;
  176. return;
  177. }
  178. }
  179. WARN(1, "PHY not found for PORT %c", port_name(port));
  180. *phy = DPIO_PHY0;
  181. *ch = DPIO_CH0;
  182. }
  183. void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
  184. enum port port, u32 margin, u32 scale,
  185. u32 enable, u32 deemphasis)
  186. {
  187. u32 val;
  188. enum dpio_phy phy;
  189. enum dpio_channel ch;
  190. bxt_port_to_phy_channel(port, &phy, &ch);
  191. /*
  192. * While we write to the group register to program all lanes at once we
  193. * can read only lane registers and we pick lanes 0/1 for that.
  194. */
  195. val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
  196. val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
  197. I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
  198. val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
  199. val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
  200. val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
  201. I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
  202. val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
  203. val &= ~SCALE_DCOMP_METHOD;
  204. if (enable)
  205. val |= SCALE_DCOMP_METHOD;
  206. if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
  207. DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
  208. I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
  209. val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
  210. val &= ~DE_EMPHASIS;
  211. val |= deemphasis << DEEMPH_SHIFT;
  212. I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
  213. val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
  214. val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
  215. I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
  216. }
  217. bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
  218. enum dpio_phy phy)
  219. {
  220. const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
  221. enum port port;
  222. if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
  223. return false;
  224. if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
  225. (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
  226. DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
  227. phy);
  228. return false;
  229. }
  230. if (phy_info->rcomp_phy == -1 &&
  231. !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
  232. DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
  233. phy);
  234. return false;
  235. }
  236. if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
  237. DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
  238. phy);
  239. return false;
  240. }
  241. for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
  242. u32 tmp = I915_READ(BXT_PHY_CTL(port));
  243. if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
  244. DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
  245. "for port %c powered down "
  246. "(PHY_CTL %08x)\n",
  247. phy, port_name(port), tmp);
  248. return false;
  249. }
  250. }
  251. return true;
  252. }
  253. static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  254. {
  255. u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
  256. return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
  257. }
  258. static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
  259. enum dpio_phy phy)
  260. {
  261. if (intel_wait_for_register(dev_priv,
  262. BXT_PORT_REF_DW3(phy),
  263. GRC_DONE, GRC_DONE,
  264. 10))
  265. DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
  266. }
  267. static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
  268. enum dpio_phy phy)
  269. {
  270. const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
  271. u32 val;
  272. if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
  273. /* Still read out the GRC value for state verification */
  274. if (phy_info->rcomp_phy != -1)
  275. dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
  276. if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
  277. DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
  278. "won't reprogram it\n", phy);
  279. return;
  280. }
  281. DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
  282. "force reprogramming it\n", phy);
  283. }
  284. val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
  285. val |= GT_DISPLAY_POWER_ON(phy);
  286. I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
  287. /*
  288. * The PHY registers start out inaccessible and respond to reads with
  289. * all 1s. Eventually they become accessible as they power up, then
  290. * the reserved bit will give the default 0. Poll on the reserved bit
  291. * becoming 0 to find when the PHY is accessible.
  292. * HW team confirmed that the time to reach phypowergood status is
  293. * anywhere between 50 us and 100us.
  294. */
  295. if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
  296. (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
  297. DRM_ERROR("timeout during PHY%d power on\n", phy);
  298. }
  299. /* Program PLL Rcomp code offset */
  300. val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
  301. val &= ~IREF0RC_OFFSET_MASK;
  302. val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
  303. I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
  304. val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
  305. val &= ~IREF1RC_OFFSET_MASK;
  306. val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
  307. I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
  308. /* Program power gating */
  309. val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
  310. val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
  311. SUS_CLK_CONFIG;
  312. I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
  313. if (phy_info->dual_channel) {
  314. val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
  315. val |= DW6_OLDO_DYN_PWR_DOWN_EN;
  316. I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
  317. }
  318. if (phy_info->rcomp_phy != -1) {
  319. uint32_t grc_code;
  320. /*
  321. * PHY0 isn't connected to an RCOMP resistor so copy over
  322. * the corresponding calibrated value from PHY1, and disable
  323. * the automatic calibration on PHY0.
  324. */
  325. val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
  326. phy_info->rcomp_phy);
  327. grc_code = val << GRC_CODE_FAST_SHIFT |
  328. val << GRC_CODE_SLOW_SHIFT |
  329. val;
  330. I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
  331. val = I915_READ(BXT_PORT_REF_DW8(phy));
  332. val |= GRC_DIS | GRC_RDY_OVRD;
  333. I915_WRITE(BXT_PORT_REF_DW8(phy), val);
  334. }
  335. val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
  336. val |= COMMON_RESET_DIS;
  337. I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
  338. if (phy_info->rcomp_phy == -1)
  339. bxt_phy_wait_grc_done(dev_priv, phy);
  340. }
  341. void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  342. {
  343. uint32_t val;
  344. val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
  345. val &= ~COMMON_RESET_DIS;
  346. I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
  347. val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
  348. val &= ~GT_DISPLAY_POWER_ON(phy);
  349. I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
  350. }
  351. void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  352. {
  353. const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
  354. enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
  355. bool was_enabled;
  356. lockdep_assert_held(&dev_priv->power_domains.lock);
  357. if (rcomp_phy != -1) {
  358. was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
  359. /*
  360. * We need to copy the GRC calibration value from rcomp_phy,
  361. * so make sure it's powered up.
  362. */
  363. if (!was_enabled)
  364. _bxt_ddi_phy_init(dev_priv, rcomp_phy);
  365. }
  366. _bxt_ddi_phy_init(dev_priv, phy);
  367. if (rcomp_phy != -1 && !was_enabled)
  368. bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
  369. }
  370. static bool __printf(6, 7)
  371. __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  372. i915_reg_t reg, u32 mask, u32 expected,
  373. const char *reg_fmt, ...)
  374. {
  375. struct va_format vaf;
  376. va_list args;
  377. u32 val;
  378. val = I915_READ(reg);
  379. if ((val & mask) == expected)
  380. return true;
  381. va_start(args, reg_fmt);
  382. vaf.fmt = reg_fmt;
  383. vaf.va = &args;
  384. DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
  385. "current %08x, expected %08x (mask %08x)\n",
  386. phy, &vaf, reg.reg, val, (val & ~mask) | expected,
  387. mask);
  388. va_end(args);
  389. return false;
  390. }
  391. bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
  392. enum dpio_phy phy)
  393. {
  394. const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
  395. uint32_t mask;
  396. bool ok;
  397. #define _CHK(reg, mask, exp, fmt, ...) \
  398. __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
  399. ## __VA_ARGS__)
  400. if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
  401. return false;
  402. ok = true;
  403. /* PLL Rcomp code offset */
  404. ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
  405. IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
  406. "BXT_PORT_CL1CM_DW9(%d)", phy);
  407. ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
  408. IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
  409. "BXT_PORT_CL1CM_DW10(%d)", phy);
  410. /* Power gating */
  411. mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
  412. ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
  413. "BXT_PORT_CL1CM_DW28(%d)", phy);
  414. if (phy_info->dual_channel)
  415. ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
  416. DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
  417. "BXT_PORT_CL2CM_DW6(%d)", phy);
  418. if (phy_info->rcomp_phy != -1) {
  419. u32 grc_code = dev_priv->bxt_phy_grc;
  420. grc_code = grc_code << GRC_CODE_FAST_SHIFT |
  421. grc_code << GRC_CODE_SLOW_SHIFT |
  422. grc_code;
  423. mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
  424. GRC_CODE_NOM_MASK;
  425. ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
  426. "BXT_PORT_REF_DW6(%d)", phy);
  427. mask = GRC_DIS | GRC_RDY_OVRD;
  428. ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
  429. "BXT_PORT_REF_DW8(%d)", phy);
  430. }
  431. return ok;
  432. #undef _CHK
  433. }
  434. uint8_t
  435. bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
  436. uint8_t lane_count)
  437. {
  438. switch (lane_count) {
  439. case 1:
  440. return 0;
  441. case 2:
  442. return BIT(2) | BIT(0);
  443. case 4:
  444. return BIT(3) | BIT(2) | BIT(0);
  445. default:
  446. MISSING_CASE(lane_count);
  447. return 0;
  448. }
  449. }
  450. void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
  451. uint8_t lane_lat_optim_mask)
  452. {
  453. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  454. struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
  455. enum port port = dport->port;
  456. enum dpio_phy phy;
  457. enum dpio_channel ch;
  458. int lane;
  459. bxt_port_to_phy_channel(port, &phy, &ch);
  460. for (lane = 0; lane < 4; lane++) {
  461. u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
  462. /*
  463. * Note that on CHV this flag is called UPAR, but has
  464. * the same function.
  465. */
  466. val &= ~LATENCY_OPTIM;
  467. if (lane_lat_optim_mask & BIT(lane))
  468. val |= LATENCY_OPTIM;
  469. I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
  470. }
  471. }
  472. uint8_t
  473. bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
  474. {
  475. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  476. struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
  477. enum port port = dport->port;
  478. enum dpio_phy phy;
  479. enum dpio_channel ch;
  480. int lane;
  481. uint8_t mask;
  482. bxt_port_to_phy_channel(port, &phy, &ch);
  483. mask = 0;
  484. for (lane = 0; lane < 4; lane++) {
  485. u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
  486. if (val & LATENCY_OPTIM)
  487. mask |= BIT(lane);
  488. }
  489. return mask;
  490. }
  491. void chv_set_phy_signal_level(struct intel_encoder *encoder,
  492. u32 deemph_reg_value, u32 margin_reg_value,
  493. bool uniq_trans_scale)
  494. {
  495. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  496. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  497. struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
  498. enum dpio_channel ch = vlv_dport_to_channel(dport);
  499. enum pipe pipe = intel_crtc->pipe;
  500. u32 val;
  501. int i;
  502. mutex_lock(&dev_priv->sb_lock);
  503. /* Clear calc init */
  504. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  505. val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  506. val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  507. val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  508. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  509. if (intel_crtc->config->lane_count > 2) {
  510. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  511. val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  512. val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  513. val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  514. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  515. }
  516. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
  517. val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  518. val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  519. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
  520. if (intel_crtc->config->lane_count > 2) {
  521. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
  522. val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  523. val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  524. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
  525. }
  526. /* Program swing deemph */
  527. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  528. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
  529. val &= ~DPIO_SWING_DEEMPH9P5_MASK;
  530. val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
  531. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
  532. }
  533. /* Program swing margin */
  534. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  535. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  536. val &= ~DPIO_SWING_MARGIN000_MASK;
  537. val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
  538. /*
  539. * Supposedly this value shouldn't matter when unique transition
  540. * scale is disabled, but in fact it does matter. Let's just
  541. * always program the same value and hope it's OK.
  542. */
  543. val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  544. val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
  545. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  546. }
  547. /*
  548. * The document said it needs to set bit 27 for ch0 and bit 26
  549. * for ch1. Might be a typo in the doc.
  550. * For now, for this unique transition scale selection, set bit
  551. * 27 for ch0 and ch1.
  552. */
  553. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  554. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  555. if (uniq_trans_scale)
  556. val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
  557. else
  558. val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
  559. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  560. }
  561. /* Start swing calculation */
  562. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  563. val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  564. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  565. if (intel_crtc->config->lane_count > 2) {
  566. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  567. val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  568. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  569. }
  570. mutex_unlock(&dev_priv->sb_lock);
  571. }
  572. void chv_data_lane_soft_reset(struct intel_encoder *encoder,
  573. bool reset)
  574. {
  575. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  576. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  577. struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  578. enum pipe pipe = crtc->pipe;
  579. uint32_t val;
  580. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  581. if (reset)
  582. val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  583. else
  584. val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
  585. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  586. if (crtc->config->lane_count > 2) {
  587. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  588. if (reset)
  589. val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  590. else
  591. val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
  592. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  593. }
  594. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  595. val |= CHV_PCS_REQ_SOFTRESET_EN;
  596. if (reset)
  597. val &= ~DPIO_PCS_CLK_SOFT_RESET;
  598. else
  599. val |= DPIO_PCS_CLK_SOFT_RESET;
  600. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  601. if (crtc->config->lane_count > 2) {
  602. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  603. val |= CHV_PCS_REQ_SOFTRESET_EN;
  604. if (reset)
  605. val &= ~DPIO_PCS_CLK_SOFT_RESET;
  606. else
  607. val |= DPIO_PCS_CLK_SOFT_RESET;
  608. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  609. }
  610. }
  611. void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
  612. {
  613. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  614. struct drm_device *dev = encoder->base.dev;
  615. struct drm_i915_private *dev_priv = to_i915(dev);
  616. struct intel_crtc *intel_crtc =
  617. to_intel_crtc(encoder->base.crtc);
  618. enum dpio_channel ch = vlv_dport_to_channel(dport);
  619. enum pipe pipe = intel_crtc->pipe;
  620. unsigned int lane_mask =
  621. intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
  622. u32 val;
  623. /*
  624. * Must trick the second common lane into life.
  625. * Otherwise we can't even access the PLL.
  626. */
  627. if (ch == DPIO_CH0 && pipe == PIPE_B)
  628. dport->release_cl2_override =
  629. !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
  630. chv_phy_powergate_lanes(encoder, true, lane_mask);
  631. mutex_lock(&dev_priv->sb_lock);
  632. /* Assert data lane reset */
  633. chv_data_lane_soft_reset(encoder, true);
  634. /* program left/right clock distribution */
  635. if (pipe != PIPE_B) {
  636. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  637. val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  638. if (ch == DPIO_CH0)
  639. val |= CHV_BUFLEFTENA1_FORCE;
  640. if (ch == DPIO_CH1)
  641. val |= CHV_BUFRIGHTENA1_FORCE;
  642. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  643. } else {
  644. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  645. val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  646. if (ch == DPIO_CH0)
  647. val |= CHV_BUFLEFTENA2_FORCE;
  648. if (ch == DPIO_CH1)
  649. val |= CHV_BUFRIGHTENA2_FORCE;
  650. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  651. }
  652. /* program clock channel usage */
  653. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
  654. val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  655. if (pipe != PIPE_B)
  656. val &= ~CHV_PCS_USEDCLKCHANNEL;
  657. else
  658. val |= CHV_PCS_USEDCLKCHANNEL;
  659. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
  660. if (intel_crtc->config->lane_count > 2) {
  661. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
  662. val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  663. if (pipe != PIPE_B)
  664. val &= ~CHV_PCS_USEDCLKCHANNEL;
  665. else
  666. val |= CHV_PCS_USEDCLKCHANNEL;
  667. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
  668. }
  669. /*
  670. * This a a bit weird since generally CL
  671. * matches the pipe, but here we need to
  672. * pick the CL based on the port.
  673. */
  674. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
  675. if (pipe != PIPE_B)
  676. val &= ~CHV_CMN_USEDCLKCHANNEL;
  677. else
  678. val |= CHV_CMN_USEDCLKCHANNEL;
  679. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
  680. mutex_unlock(&dev_priv->sb_lock);
  681. }
  682. void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
  683. {
  684. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  685. struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  686. struct drm_device *dev = encoder->base.dev;
  687. struct drm_i915_private *dev_priv = to_i915(dev);
  688. struct intel_crtc *intel_crtc =
  689. to_intel_crtc(encoder->base.crtc);
  690. enum dpio_channel ch = vlv_dport_to_channel(dport);
  691. int pipe = intel_crtc->pipe;
  692. int data, i, stagger;
  693. u32 val;
  694. mutex_lock(&dev_priv->sb_lock);
  695. /* allow hardware to manage TX FIFO reset source */
  696. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  697. val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  698. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  699. if (intel_crtc->config->lane_count > 2) {
  700. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  701. val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  702. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  703. }
  704. /* Program Tx lane latency optimal setting*/
  705. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  706. /* Set the upar bit */
  707. if (intel_crtc->config->lane_count == 1)
  708. data = 0x0;
  709. else
  710. data = (i == 1) ? 0x0 : 0x1;
  711. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
  712. data << DPIO_UPAR_SHIFT);
  713. }
  714. /* Data lane stagger programming */
  715. if (intel_crtc->config->port_clock > 270000)
  716. stagger = 0x18;
  717. else if (intel_crtc->config->port_clock > 135000)
  718. stagger = 0xd;
  719. else if (intel_crtc->config->port_clock > 67500)
  720. stagger = 0x7;
  721. else if (intel_crtc->config->port_clock > 33750)
  722. stagger = 0x4;
  723. else
  724. stagger = 0x2;
  725. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  726. val |= DPIO_TX2_STAGGER_MASK(0x1f);
  727. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  728. if (intel_crtc->config->lane_count > 2) {
  729. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  730. val |= DPIO_TX2_STAGGER_MASK(0x1f);
  731. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  732. }
  733. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
  734. DPIO_LANESTAGGER_STRAP(stagger) |
  735. DPIO_LANESTAGGER_STRAP_OVRD |
  736. DPIO_TX1_STAGGER_MASK(0x1f) |
  737. DPIO_TX1_STAGGER_MULT(6) |
  738. DPIO_TX2_STAGGER_MULT(0));
  739. if (intel_crtc->config->lane_count > 2) {
  740. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
  741. DPIO_LANESTAGGER_STRAP(stagger) |
  742. DPIO_LANESTAGGER_STRAP_OVRD |
  743. DPIO_TX1_STAGGER_MASK(0x1f) |
  744. DPIO_TX1_STAGGER_MULT(7) |
  745. DPIO_TX2_STAGGER_MULT(5));
  746. }
  747. /* Deassert data lane reset */
  748. chv_data_lane_soft_reset(encoder, false);
  749. mutex_unlock(&dev_priv->sb_lock);
  750. }
  751. void chv_phy_release_cl2_override(struct intel_encoder *encoder)
  752. {
  753. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  754. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  755. if (dport->release_cl2_override) {
  756. chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
  757. dport->release_cl2_override = false;
  758. }
  759. }
  760. void chv_phy_post_pll_disable(struct intel_encoder *encoder)
  761. {
  762. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  763. enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
  764. u32 val;
  765. mutex_lock(&dev_priv->sb_lock);
  766. /* disable left/right clock distribution */
  767. if (pipe != PIPE_B) {
  768. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  769. val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  770. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  771. } else {
  772. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  773. val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  774. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  775. }
  776. mutex_unlock(&dev_priv->sb_lock);
  777. /*
  778. * Leave the power down bit cleared for at least one
  779. * lane so that chv_powergate_phy_ch() will power
  780. * on something when the channel is otherwise unused.
  781. * When the port is off and the override is removed
  782. * the lanes power down anyway, so otherwise it doesn't
  783. * really matter what the state of power down bits is
  784. * after this.
  785. */
  786. chv_phy_powergate_lanes(encoder, false, 0x0);
  787. }
  788. void vlv_set_phy_signal_level(struct intel_encoder *encoder,
  789. u32 demph_reg_value, u32 preemph_reg_value,
  790. u32 uniqtranscale_reg_value, u32 tx3_demph)
  791. {
  792. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  793. struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  794. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  795. enum dpio_channel port = vlv_dport_to_channel(dport);
  796. int pipe = intel_crtc->pipe;
  797. mutex_lock(&dev_priv->sb_lock);
  798. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
  799. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
  800. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
  801. uniqtranscale_reg_value);
  802. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
  803. if (tx3_demph)
  804. vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
  805. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
  806. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
  807. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
  808. mutex_unlock(&dev_priv->sb_lock);
  809. }
  810. void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
  811. {
  812. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  813. struct drm_device *dev = encoder->base.dev;
  814. struct drm_i915_private *dev_priv = to_i915(dev);
  815. struct intel_crtc *intel_crtc =
  816. to_intel_crtc(encoder->base.crtc);
  817. enum dpio_channel port = vlv_dport_to_channel(dport);
  818. int pipe = intel_crtc->pipe;
  819. /* Program Tx lane resets to default */
  820. mutex_lock(&dev_priv->sb_lock);
  821. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
  822. DPIO_PCS_TX_LANE2_RESET |
  823. DPIO_PCS_TX_LANE1_RESET);
  824. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
  825. DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  826. DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  827. (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  828. DPIO_PCS_CLK_SOFT_RESET);
  829. /* Fix up inter-pair skew failure */
  830. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
  831. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
  832. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
  833. mutex_unlock(&dev_priv->sb_lock);
  834. }
  835. void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
  836. {
  837. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  838. struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  839. struct drm_device *dev = encoder->base.dev;
  840. struct drm_i915_private *dev_priv = to_i915(dev);
  841. struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  842. enum dpio_channel port = vlv_dport_to_channel(dport);
  843. int pipe = intel_crtc->pipe;
  844. u32 val;
  845. mutex_lock(&dev_priv->sb_lock);
  846. /* Enable clock channels for this port */
  847. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
  848. val = 0;
  849. if (pipe)
  850. val |= (1<<21);
  851. else
  852. val &= ~(1<<21);
  853. val |= 0x001000c4;
  854. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
  855. /* Program lane clock */
  856. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
  857. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  858. mutex_unlock(&dev_priv->sb_lock);
  859. }
  860. void vlv_phy_reset_lanes(struct intel_encoder *encoder)
  861. {
  862. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  863. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  864. struct intel_crtc *intel_crtc =
  865. to_intel_crtc(encoder->base.crtc);
  866. enum dpio_channel port = vlv_dport_to_channel(dport);
  867. int pipe = intel_crtc->pipe;
  868. mutex_lock(&dev_priv->sb_lock);
  869. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
  870. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
  871. mutex_unlock(&dev_priv->sb_lock);
  872. }