intel_dpio_phy.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103
  1. /*
  2. * Copyright © 2014-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. #include "intel_drv.h"
  24. /**
  25. * DOC: DPIO
  26. *
  27. * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
  28. * ports. DPIO is the name given to such a display PHY. These PHYs
  29. * don't follow the standard programming model using direct MMIO
  30. * registers, and instead their registers must be accessed trough IOSF
  31. * sideband. VLV has one such PHY for driving ports B and C, and CHV
  32. * adds another PHY for driving port D. Each PHY responds to specific
  33. * IOSF-SB port.
  34. *
  35. * Each display PHY is made up of one or two channels. Each channel
  36. * houses a common lane part which contains the PLL and other common
  37. * logic. CH0 common lane also contains the IOSF-SB logic for the
  38. * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
  39. * must be running when any DPIO registers are accessed.
  40. *
  41. * In addition to having their own registers, the PHYs are also
  42. * controlled through some dedicated signals from the display
  43. * controller. These include PLL reference clock enable, PLL enable,
  44. * and CRI clock selection, for example.
  45. *
  46. * Eeach channel also has two splines (also called data lanes), and
  47. * each spline is made up of one Physical Access Coding Sub-Layer
  48. * (PCS) block and two TX lanes. So each channel has two PCS blocks
  49. * and four TX lanes. The TX lanes are used as DP lanes or TMDS
  50. * data/clock pairs depending on the output type.
  51. *
  52. * Additionally the PHY also contains an AUX lane with AUX blocks
  53. * for each channel. This is used for DP AUX communication, but
  54. * this fact isn't really relevant for the driver since AUX is
  55. * controlled from the display controller side. No DPIO registers
  56. * need to be accessed during AUX communication,
  57. *
  58. * Generally on VLV/CHV the common lane corresponds to the pipe and
  59. * the spline (PCS/TX) corresponds to the port.
  60. *
  61. * For dual channel PHY (VLV/CHV):
  62. *
  63. * pipe A == CMN/PLL/REF CH0
  64. *
  65. * pipe B == CMN/PLL/REF CH1
  66. *
  67. * port B == PCS/TX CH0
  68. *
  69. * port C == PCS/TX CH1
  70. *
  71. * This is especially important when we cross the streams
  72. * ie. drive port B with pipe B, or port C with pipe A.
  73. *
  74. * For single channel PHY (CHV):
  75. *
  76. * pipe C == CMN/PLL/REF CH0
  77. *
  78. * port D == PCS/TX CH0
  79. *
  80. * On BXT the entire PHY channel corresponds to the port. That means
  81. * the PLL is also now associated with the port rather than the pipe,
  82. * and so the clock needs to be routed to the appropriate transcoder.
  83. * Port A PLL is directly connected to transcoder EDP and port B/C
  84. * PLLs can be routed to any transcoder A/B/C.
  85. *
  86. * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
  87. * digital port D (CHV) or port A (BXT). ::
  88. *
  89. *
  90. * Dual channel PHY (VLV/CHV/BXT)
  91. * ---------------------------------
  92. * | CH0 | CH1 |
  93. * | CMN/PLL/REF | CMN/PLL/REF |
  94. * |---------------|---------------| Display PHY
  95. * | PCS01 | PCS23 | PCS01 | PCS23 |
  96. * |-------|-------|-------|-------|
  97. * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
  98. * ---------------------------------
  99. * | DDI0 | DDI1 | DP/HDMI ports
  100. * ---------------------------------
  101. *
  102. * Single channel PHY (CHV/BXT)
  103. * -----------------
  104. * | CH0 |
  105. * | CMN/PLL/REF |
  106. * |---------------| Display PHY
  107. * | PCS01 | PCS23 |
  108. * |-------|-------|
  109. * |TX0|TX1|TX2|TX3|
  110. * -----------------
  111. * | DDI2 | DP/HDMI port
  112. * -----------------
  113. */
  114. /**
  115. * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
  116. */
  117. struct bxt_ddi_phy_info {
  118. /**
  119. * @dual_channel: true if this phy has a second channel.
  120. */
  121. bool dual_channel;
  122. /**
  123. * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
  124. * Otherwise the GRC value will be copied from the phy indicated by
  125. * this field.
  126. */
  127. enum dpio_phy rcomp_phy;
  128. /**
  129. * @reset_delay: delay in us to wait before setting the common reset
  130. * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
  131. */
  132. int reset_delay;
  133. /**
  134. * @pwron_mask: Mask with the appropriate bit set that would cause the
  135. * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
  136. */
  137. u32 pwron_mask;
  138. /**
  139. * @channel: struct containing per channel information.
  140. */
  141. struct {
  142. /**
  143. * @port: which port maps to this channel.
  144. */
  145. enum port port;
  146. } channel[2];
  147. };
  148. static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
  149. [DPIO_PHY0] = {
  150. .dual_channel = true,
  151. .rcomp_phy = DPIO_PHY1,
  152. .pwron_mask = BIT(0),
  153. .channel = {
  154. [DPIO_CH0] = { .port = PORT_B },
  155. [DPIO_CH1] = { .port = PORT_C },
  156. }
  157. },
  158. [DPIO_PHY1] = {
  159. .dual_channel = false,
  160. .rcomp_phy = -1,
  161. .pwron_mask = BIT(1),
  162. .channel = {
  163. [DPIO_CH0] = { .port = PORT_A },
  164. }
  165. },
  166. };
  167. static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
  168. [DPIO_PHY0] = {
  169. .dual_channel = false,
  170. .rcomp_phy = DPIO_PHY1,
  171. .pwron_mask = BIT(0),
  172. .reset_delay = 20,
  173. .channel = {
  174. [DPIO_CH0] = { .port = PORT_B },
  175. }
  176. },
  177. [DPIO_PHY1] = {
  178. .dual_channel = false,
  179. .rcomp_phy = -1,
  180. .pwron_mask = BIT(3),
  181. .reset_delay = 20,
  182. .channel = {
  183. [DPIO_CH0] = { .port = PORT_A },
  184. }
  185. },
  186. [DPIO_PHY2] = {
  187. .dual_channel = false,
  188. .rcomp_phy = DPIO_PHY1,
  189. .pwron_mask = BIT(1),
  190. .reset_delay = 20,
  191. .channel = {
  192. [DPIO_CH0] = { .port = PORT_C },
  193. }
  194. },
  195. };
  196. static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
  197. {
  198. return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
  199. BIT(phy_info->channel[DPIO_CH0].port);
  200. }
  201. static const struct bxt_ddi_phy_info *
  202. bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
  203. {
  204. if (IS_GEMINILAKE(dev_priv)) {
  205. *count = ARRAY_SIZE(glk_ddi_phy_info);
  206. return glk_ddi_phy_info;
  207. } else {
  208. *count = ARRAY_SIZE(bxt_ddi_phy_info);
  209. return bxt_ddi_phy_info;
  210. }
  211. }
  212. static const struct bxt_ddi_phy_info *
  213. bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  214. {
  215. int count;
  216. const struct bxt_ddi_phy_info *phy_list =
  217. bxt_get_phy_list(dev_priv, &count);
  218. return &phy_list[phy];
  219. }
  220. void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
  221. enum dpio_phy *phy, enum dpio_channel *ch)
  222. {
  223. const struct bxt_ddi_phy_info *phy_info, *phys;
  224. int i, count;
  225. phys = bxt_get_phy_list(dev_priv, &count);
  226. for (i = 0; i < count; i++) {
  227. phy_info = &phys[i];
  228. if (port == phy_info->channel[DPIO_CH0].port) {
  229. *phy = i;
  230. *ch = DPIO_CH0;
  231. return;
  232. }
  233. if (phy_info->dual_channel &&
  234. port == phy_info->channel[DPIO_CH1].port) {
  235. *phy = i;
  236. *ch = DPIO_CH1;
  237. return;
  238. }
  239. }
  240. WARN(1, "PHY not found for PORT %c", port_name(port));
  241. *phy = DPIO_PHY0;
  242. *ch = DPIO_CH0;
  243. }
  244. void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
  245. enum port port, u32 margin, u32 scale,
  246. u32 enable, u32 deemphasis)
  247. {
  248. u32 val;
  249. enum dpio_phy phy;
  250. enum dpio_channel ch;
  251. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  252. /*
  253. * While we write to the group register to program all lanes at once we
  254. * can read only lane registers and we pick lanes 0/1 for that.
  255. */
  256. val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
  257. val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
  258. I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
  259. val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
  260. val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
  261. val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
  262. I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
  263. val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
  264. val &= ~SCALE_DCOMP_METHOD;
  265. if (enable)
  266. val |= SCALE_DCOMP_METHOD;
  267. if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
  268. DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
  269. I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
  270. val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
  271. val &= ~DE_EMPHASIS;
  272. val |= deemphasis << DEEMPH_SHIFT;
  273. I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
  274. val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
  275. val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
  276. I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
  277. }
  278. bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
  279. enum dpio_phy phy)
  280. {
  281. const struct bxt_ddi_phy_info *phy_info;
  282. enum port port;
  283. phy_info = bxt_get_phy_info(dev_priv, phy);
  284. if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
  285. return false;
  286. if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
  287. (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
  288. DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
  289. phy);
  290. return false;
  291. }
  292. if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
  293. DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
  294. phy);
  295. return false;
  296. }
  297. for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
  298. u32 tmp = I915_READ(BXT_PHY_CTL(port));
  299. if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
  300. DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
  301. "for port %c powered down "
  302. "(PHY_CTL %08x)\n",
  303. phy, port_name(port), tmp);
  304. return false;
  305. }
  306. }
  307. return true;
  308. }
  309. static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  310. {
  311. u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
  312. return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
  313. }
  314. static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
  315. enum dpio_phy phy)
  316. {
  317. if (intel_wait_for_register(dev_priv,
  318. BXT_PORT_REF_DW3(phy),
  319. GRC_DONE, GRC_DONE,
  320. 10))
  321. DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
  322. }
  323. static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
  324. enum dpio_phy phy)
  325. {
  326. const struct bxt_ddi_phy_info *phy_info;
  327. u32 val;
  328. phy_info = bxt_get_phy_info(dev_priv, phy);
  329. if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
  330. /* Still read out the GRC value for state verification */
  331. if (phy_info->rcomp_phy != -1)
  332. dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
  333. if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
  334. DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
  335. "won't reprogram it\n", phy);
  336. return;
  337. }
  338. DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
  339. "force reprogramming it\n", phy);
  340. }
  341. val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
  342. val |= phy_info->pwron_mask;
  343. I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
  344. /*
  345. * The PHY registers start out inaccessible and respond to reads with
  346. * all 1s. Eventually they become accessible as they power up, then
  347. * the reserved bit will give the default 0. Poll on the reserved bit
  348. * becoming 0 to find when the PHY is accessible.
  349. * HW team confirmed that the time to reach phypowergood status is
  350. * anywhere between 50 us and 100us.
  351. */
  352. if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
  353. (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
  354. DRM_ERROR("timeout during PHY%d power on\n", phy);
  355. }
  356. /* Program PLL Rcomp code offset */
  357. val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
  358. val &= ~IREF0RC_OFFSET_MASK;
  359. val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
  360. I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
  361. val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
  362. val &= ~IREF1RC_OFFSET_MASK;
  363. val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
  364. I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
  365. /* Program power gating */
  366. val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
  367. val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
  368. SUS_CLK_CONFIG;
  369. I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
  370. if (phy_info->dual_channel) {
  371. val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
  372. val |= DW6_OLDO_DYN_PWR_DOWN_EN;
  373. I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
  374. }
  375. if (phy_info->rcomp_phy != -1) {
  376. uint32_t grc_code;
  377. bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
  378. /*
  379. * PHY0 isn't connected to an RCOMP resistor so copy over
  380. * the corresponding calibrated value from PHY1, and disable
  381. * the automatic calibration on PHY0.
  382. */
  383. val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
  384. phy_info->rcomp_phy);
  385. grc_code = val << GRC_CODE_FAST_SHIFT |
  386. val << GRC_CODE_SLOW_SHIFT |
  387. val;
  388. I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
  389. val = I915_READ(BXT_PORT_REF_DW8(phy));
  390. val |= GRC_DIS | GRC_RDY_OVRD;
  391. I915_WRITE(BXT_PORT_REF_DW8(phy), val);
  392. }
  393. if (phy_info->reset_delay)
  394. udelay(phy_info->reset_delay);
  395. val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
  396. val |= COMMON_RESET_DIS;
  397. I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
  398. }
  399. void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  400. {
  401. const struct bxt_ddi_phy_info *phy_info;
  402. uint32_t val;
  403. phy_info = bxt_get_phy_info(dev_priv, phy);
  404. val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
  405. val &= ~COMMON_RESET_DIS;
  406. I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
  407. val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
  408. val &= ~phy_info->pwron_mask;
  409. I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
  410. }
  411. void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
  412. {
  413. const struct bxt_ddi_phy_info *phy_info =
  414. bxt_get_phy_info(dev_priv, phy);
  415. enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
  416. bool was_enabled;
  417. lockdep_assert_held(&dev_priv->power_domains.lock);
  418. if (rcomp_phy != -1) {
  419. was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
  420. /*
  421. * We need to copy the GRC calibration value from rcomp_phy,
  422. * so make sure it's powered up.
  423. */
  424. if (!was_enabled)
  425. _bxt_ddi_phy_init(dev_priv, rcomp_phy);
  426. }
  427. _bxt_ddi_phy_init(dev_priv, phy);
  428. if (rcomp_phy != -1 && !was_enabled)
  429. bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
  430. }
  431. static bool __printf(6, 7)
  432. __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  433. i915_reg_t reg, u32 mask, u32 expected,
  434. const char *reg_fmt, ...)
  435. {
  436. struct va_format vaf;
  437. va_list args;
  438. u32 val;
  439. val = I915_READ(reg);
  440. if ((val & mask) == expected)
  441. return true;
  442. va_start(args, reg_fmt);
  443. vaf.fmt = reg_fmt;
  444. vaf.va = &args;
  445. DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
  446. "current %08x, expected %08x (mask %08x)\n",
  447. phy, &vaf, reg.reg, val, (val & ~mask) | expected,
  448. mask);
  449. va_end(args);
  450. return false;
  451. }
  452. bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
  453. enum dpio_phy phy)
  454. {
  455. const struct bxt_ddi_phy_info *phy_info;
  456. uint32_t mask;
  457. bool ok;
  458. phy_info = bxt_get_phy_info(dev_priv, phy);
  459. #define _CHK(reg, mask, exp, fmt, ...) \
  460. __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
  461. ## __VA_ARGS__)
  462. if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
  463. return false;
  464. ok = true;
  465. /* PLL Rcomp code offset */
  466. ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
  467. IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
  468. "BXT_PORT_CL1CM_DW9(%d)", phy);
  469. ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
  470. IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
  471. "BXT_PORT_CL1CM_DW10(%d)", phy);
  472. /* Power gating */
  473. mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
  474. ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
  475. "BXT_PORT_CL1CM_DW28(%d)", phy);
  476. if (phy_info->dual_channel)
  477. ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
  478. DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
  479. "BXT_PORT_CL2CM_DW6(%d)", phy);
  480. if (phy_info->rcomp_phy != -1) {
  481. u32 grc_code = dev_priv->bxt_phy_grc;
  482. grc_code = grc_code << GRC_CODE_FAST_SHIFT |
  483. grc_code << GRC_CODE_SLOW_SHIFT |
  484. grc_code;
  485. mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
  486. GRC_CODE_NOM_MASK;
  487. ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
  488. "BXT_PORT_REF_DW6(%d)", phy);
  489. mask = GRC_DIS | GRC_RDY_OVRD;
  490. ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
  491. "BXT_PORT_REF_DW8(%d)", phy);
  492. }
  493. return ok;
  494. #undef _CHK
  495. }
  496. uint8_t
  497. bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
  498. uint8_t lane_count)
  499. {
  500. switch (lane_count) {
  501. case 1:
  502. return 0;
  503. case 2:
  504. return BIT(2) | BIT(0);
  505. case 4:
  506. return BIT(3) | BIT(2) | BIT(0);
  507. default:
  508. MISSING_CASE(lane_count);
  509. return 0;
  510. }
  511. }
  512. void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
  513. uint8_t lane_lat_optim_mask)
  514. {
  515. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  516. struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
  517. enum port port = dport->port;
  518. enum dpio_phy phy;
  519. enum dpio_channel ch;
  520. int lane;
  521. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  522. for (lane = 0; lane < 4; lane++) {
  523. u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
  524. /*
  525. * Note that on CHV this flag is called UPAR, but has
  526. * the same function.
  527. */
  528. val &= ~LATENCY_OPTIM;
  529. if (lane_lat_optim_mask & BIT(lane))
  530. val |= LATENCY_OPTIM;
  531. I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
  532. }
  533. }
  534. uint8_t
  535. bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
  536. {
  537. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  538. struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
  539. enum port port = dport->port;
  540. enum dpio_phy phy;
  541. enum dpio_channel ch;
  542. int lane;
  543. uint8_t mask;
  544. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  545. mask = 0;
  546. for (lane = 0; lane < 4; lane++) {
  547. u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
  548. if (val & LATENCY_OPTIM)
  549. mask |= BIT(lane);
  550. }
  551. return mask;
  552. }
  553. void chv_set_phy_signal_level(struct intel_encoder *encoder,
  554. u32 deemph_reg_value, u32 margin_reg_value,
  555. bool uniq_trans_scale)
  556. {
  557. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  558. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  559. struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
  560. enum dpio_channel ch = vlv_dport_to_channel(dport);
  561. enum pipe pipe = intel_crtc->pipe;
  562. u32 val;
  563. int i;
  564. mutex_lock(&dev_priv->sb_lock);
  565. /* Clear calc init */
  566. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  567. val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  568. val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  569. val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  570. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  571. if (intel_crtc->config->lane_count > 2) {
  572. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  573. val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  574. val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  575. val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  576. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  577. }
  578. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
  579. val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  580. val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  581. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
  582. if (intel_crtc->config->lane_count > 2) {
  583. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
  584. val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  585. val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  586. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
  587. }
  588. /* Program swing deemph */
  589. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  590. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
  591. val &= ~DPIO_SWING_DEEMPH9P5_MASK;
  592. val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
  593. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
  594. }
  595. /* Program swing margin */
  596. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  597. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  598. val &= ~DPIO_SWING_MARGIN000_MASK;
  599. val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
  600. /*
  601. * Supposedly this value shouldn't matter when unique transition
  602. * scale is disabled, but in fact it does matter. Let's just
  603. * always program the same value and hope it's OK.
  604. */
  605. val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  606. val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
  607. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  608. }
  609. /*
  610. * The document said it needs to set bit 27 for ch0 and bit 26
  611. * for ch1. Might be a typo in the doc.
  612. * For now, for this unique transition scale selection, set bit
  613. * 27 for ch0 and ch1.
  614. */
  615. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  616. val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  617. if (uniq_trans_scale)
  618. val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
  619. else
  620. val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
  621. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  622. }
  623. /* Start swing calculation */
  624. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  625. val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  626. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  627. if (intel_crtc->config->lane_count > 2) {
  628. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  629. val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  630. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  631. }
  632. mutex_unlock(&dev_priv->sb_lock);
  633. }
  634. void chv_data_lane_soft_reset(struct intel_encoder *encoder,
  635. bool reset)
  636. {
  637. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  638. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  639. struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  640. enum pipe pipe = crtc->pipe;
  641. uint32_t val;
  642. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  643. if (reset)
  644. val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  645. else
  646. val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
  647. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  648. if (crtc->config->lane_count > 2) {
  649. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  650. if (reset)
  651. val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  652. else
  653. val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
  654. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  655. }
  656. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  657. val |= CHV_PCS_REQ_SOFTRESET_EN;
  658. if (reset)
  659. val &= ~DPIO_PCS_CLK_SOFT_RESET;
  660. else
  661. val |= DPIO_PCS_CLK_SOFT_RESET;
  662. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  663. if (crtc->config->lane_count > 2) {
  664. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  665. val |= CHV_PCS_REQ_SOFTRESET_EN;
  666. if (reset)
  667. val &= ~DPIO_PCS_CLK_SOFT_RESET;
  668. else
  669. val |= DPIO_PCS_CLK_SOFT_RESET;
  670. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  671. }
  672. }
  673. void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
  674. {
  675. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  676. struct drm_device *dev = encoder->base.dev;
  677. struct drm_i915_private *dev_priv = to_i915(dev);
  678. struct intel_crtc *intel_crtc =
  679. to_intel_crtc(encoder->base.crtc);
  680. enum dpio_channel ch = vlv_dport_to_channel(dport);
  681. enum pipe pipe = intel_crtc->pipe;
  682. unsigned int lane_mask =
  683. intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
  684. u32 val;
  685. /*
  686. * Must trick the second common lane into life.
  687. * Otherwise we can't even access the PLL.
  688. */
  689. if (ch == DPIO_CH0 && pipe == PIPE_B)
  690. dport->release_cl2_override =
  691. !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
  692. chv_phy_powergate_lanes(encoder, true, lane_mask);
  693. mutex_lock(&dev_priv->sb_lock);
  694. /* Assert data lane reset */
  695. chv_data_lane_soft_reset(encoder, true);
  696. /* program left/right clock distribution */
  697. if (pipe != PIPE_B) {
  698. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  699. val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  700. if (ch == DPIO_CH0)
  701. val |= CHV_BUFLEFTENA1_FORCE;
  702. if (ch == DPIO_CH1)
  703. val |= CHV_BUFRIGHTENA1_FORCE;
  704. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  705. } else {
  706. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  707. val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  708. if (ch == DPIO_CH0)
  709. val |= CHV_BUFLEFTENA2_FORCE;
  710. if (ch == DPIO_CH1)
  711. val |= CHV_BUFRIGHTENA2_FORCE;
  712. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  713. }
  714. /* program clock channel usage */
  715. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
  716. val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  717. if (pipe != PIPE_B)
  718. val &= ~CHV_PCS_USEDCLKCHANNEL;
  719. else
  720. val |= CHV_PCS_USEDCLKCHANNEL;
  721. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
  722. if (intel_crtc->config->lane_count > 2) {
  723. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
  724. val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  725. if (pipe != PIPE_B)
  726. val &= ~CHV_PCS_USEDCLKCHANNEL;
  727. else
  728. val |= CHV_PCS_USEDCLKCHANNEL;
  729. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
  730. }
  731. /*
  732. * This a a bit weird since generally CL
  733. * matches the pipe, but here we need to
  734. * pick the CL based on the port.
  735. */
  736. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
  737. if (pipe != PIPE_B)
  738. val &= ~CHV_CMN_USEDCLKCHANNEL;
  739. else
  740. val |= CHV_CMN_USEDCLKCHANNEL;
  741. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
  742. mutex_unlock(&dev_priv->sb_lock);
  743. }
  744. void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
  745. {
  746. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  747. struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  748. struct drm_device *dev = encoder->base.dev;
  749. struct drm_i915_private *dev_priv = to_i915(dev);
  750. struct intel_crtc *intel_crtc =
  751. to_intel_crtc(encoder->base.crtc);
  752. enum dpio_channel ch = vlv_dport_to_channel(dport);
  753. int pipe = intel_crtc->pipe;
  754. int data, i, stagger;
  755. u32 val;
  756. mutex_lock(&dev_priv->sb_lock);
  757. /* allow hardware to manage TX FIFO reset source */
  758. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  759. val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  760. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  761. if (intel_crtc->config->lane_count > 2) {
  762. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  763. val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  764. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  765. }
  766. /* Program Tx lane latency optimal setting*/
  767. for (i = 0; i < intel_crtc->config->lane_count; i++) {
  768. /* Set the upar bit */
  769. if (intel_crtc->config->lane_count == 1)
  770. data = 0x0;
  771. else
  772. data = (i == 1) ? 0x0 : 0x1;
  773. vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
  774. data << DPIO_UPAR_SHIFT);
  775. }
  776. /* Data lane stagger programming */
  777. if (intel_crtc->config->port_clock > 270000)
  778. stagger = 0x18;
  779. else if (intel_crtc->config->port_clock > 135000)
  780. stagger = 0xd;
  781. else if (intel_crtc->config->port_clock > 67500)
  782. stagger = 0x7;
  783. else if (intel_crtc->config->port_clock > 33750)
  784. stagger = 0x4;
  785. else
  786. stagger = 0x2;
  787. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  788. val |= DPIO_TX2_STAGGER_MASK(0x1f);
  789. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  790. if (intel_crtc->config->lane_count > 2) {
  791. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  792. val |= DPIO_TX2_STAGGER_MASK(0x1f);
  793. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  794. }
  795. vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
  796. DPIO_LANESTAGGER_STRAP(stagger) |
  797. DPIO_LANESTAGGER_STRAP_OVRD |
  798. DPIO_TX1_STAGGER_MASK(0x1f) |
  799. DPIO_TX1_STAGGER_MULT(6) |
  800. DPIO_TX2_STAGGER_MULT(0));
  801. if (intel_crtc->config->lane_count > 2) {
  802. vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
  803. DPIO_LANESTAGGER_STRAP(stagger) |
  804. DPIO_LANESTAGGER_STRAP_OVRD |
  805. DPIO_TX1_STAGGER_MASK(0x1f) |
  806. DPIO_TX1_STAGGER_MULT(7) |
  807. DPIO_TX2_STAGGER_MULT(5));
  808. }
  809. /* Deassert data lane reset */
  810. chv_data_lane_soft_reset(encoder, false);
  811. mutex_unlock(&dev_priv->sb_lock);
  812. }
  813. void chv_phy_release_cl2_override(struct intel_encoder *encoder)
  814. {
  815. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  816. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  817. if (dport->release_cl2_override) {
  818. chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
  819. dport->release_cl2_override = false;
  820. }
  821. }
  822. void chv_phy_post_pll_disable(struct intel_encoder *encoder)
  823. {
  824. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  825. enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
  826. u32 val;
  827. mutex_lock(&dev_priv->sb_lock);
  828. /* disable left/right clock distribution */
  829. if (pipe != PIPE_B) {
  830. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  831. val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  832. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  833. } else {
  834. val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  835. val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  836. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  837. }
  838. mutex_unlock(&dev_priv->sb_lock);
  839. /*
  840. * Leave the power down bit cleared for at least one
  841. * lane so that chv_powergate_phy_ch() will power
  842. * on something when the channel is otherwise unused.
  843. * When the port is off and the override is removed
  844. * the lanes power down anyway, so otherwise it doesn't
  845. * really matter what the state of power down bits is
  846. * after this.
  847. */
  848. chv_phy_powergate_lanes(encoder, false, 0x0);
  849. }
  850. void vlv_set_phy_signal_level(struct intel_encoder *encoder,
  851. u32 demph_reg_value, u32 preemph_reg_value,
  852. u32 uniqtranscale_reg_value, u32 tx3_demph)
  853. {
  854. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  855. struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  856. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  857. enum dpio_channel port = vlv_dport_to_channel(dport);
  858. int pipe = intel_crtc->pipe;
  859. mutex_lock(&dev_priv->sb_lock);
  860. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
  861. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
  862. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
  863. uniqtranscale_reg_value);
  864. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
  865. if (tx3_demph)
  866. vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
  867. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
  868. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
  869. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
  870. mutex_unlock(&dev_priv->sb_lock);
  871. }
  872. void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
  873. {
  874. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  875. struct drm_device *dev = encoder->base.dev;
  876. struct drm_i915_private *dev_priv = to_i915(dev);
  877. struct intel_crtc *intel_crtc =
  878. to_intel_crtc(encoder->base.crtc);
  879. enum dpio_channel port = vlv_dport_to_channel(dport);
  880. int pipe = intel_crtc->pipe;
  881. /* Program Tx lane resets to default */
  882. mutex_lock(&dev_priv->sb_lock);
  883. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
  884. DPIO_PCS_TX_LANE2_RESET |
  885. DPIO_PCS_TX_LANE1_RESET);
  886. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
  887. DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  888. DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  889. (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  890. DPIO_PCS_CLK_SOFT_RESET);
  891. /* Fix up inter-pair skew failure */
  892. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
  893. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
  894. vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
  895. mutex_unlock(&dev_priv->sb_lock);
  896. }
  897. void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
  898. {
  899. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  900. struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  901. struct drm_device *dev = encoder->base.dev;
  902. struct drm_i915_private *dev_priv = to_i915(dev);
  903. struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  904. enum dpio_channel port = vlv_dport_to_channel(dport);
  905. int pipe = intel_crtc->pipe;
  906. u32 val;
  907. mutex_lock(&dev_priv->sb_lock);
  908. /* Enable clock channels for this port */
  909. val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
  910. val = 0;
  911. if (pipe)
  912. val |= (1<<21);
  913. else
  914. val &= ~(1<<21);
  915. val |= 0x001000c4;
  916. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
  917. /* Program lane clock */
  918. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
  919. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  920. mutex_unlock(&dev_priv->sb_lock);
  921. }
  922. void vlv_phy_reset_lanes(struct intel_encoder *encoder)
  923. {
  924. struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  925. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  926. struct intel_crtc *intel_crtc =
  927. to_intel_crtc(encoder->base.crtc);
  928. enum dpio_channel port = vlv_dport_to_channel(dport);
  929. int pipe = intel_crtc->pipe;
  930. mutex_lock(&dev_priv->sb_lock);
  931. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
  932. vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
  933. mutex_unlock(&dev_priv->sb_lock);
  934. }