intel_dpll_mgr.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126
  1. /*
  2. * Copyright © 2006-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. #include "intel_drv.h"
  24. /**
  25. * DOC: Display PLLs
  26. *
  27. * Display PLLs used for driving outputs vary by platform. While some have
  28. * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  29. * from a pool. In the latter scenario, it is possible that multiple pipes
  30. * share a PLL if their configurations match.
  31. *
  32. * This file provides an abstraction over display PLLs. The function
  33. * intel_shared_dpll_init() initializes the PLLs for the given platform. The
  34. * users of a PLL are tracked and that tracking is integrated with the atomic
  35. * modest interface. During an atomic operation, a PLL can be requested for a
  36. * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
  37. * a previously used PLL can be released with intel_release_shared_dpll().
  38. * Changes to the users are first staged in the atomic state, and then made
  39. * effective by calling intel_shared_dpll_swap_state() during the atomic
  40. * commit phase.
  41. */
  42. struct intel_shared_dpll *
  43. skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
  44. {
  45. struct intel_shared_dpll *pll = NULL;
  46. struct intel_dpll_hw_state dpll_hw_state;
  47. enum intel_dpll_id i;
  48. bool found = false;
  49. if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
  50. return pll;
  51. for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
  52. pll = &dev_priv->shared_dplls[i];
  53. /* Only want to check enabled timings first */
  54. if (pll->state.crtc_mask == 0)
  55. continue;
  56. if (memcmp(&dpll_hw_state, &pll->state.hw_state,
  57. sizeof(pll->state.hw_state)) == 0) {
  58. found = true;
  59. break;
  60. }
  61. }
  62. /* Ok no matching timings, maybe there's a free one? */
  63. for (i = DPLL_ID_SKL_DPLL1;
  64. ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
  65. pll = &dev_priv->shared_dplls[i];
  66. if (pll->state.crtc_mask == 0) {
  67. pll->state.hw_state = dpll_hw_state;
  68. break;
  69. }
  70. }
  71. return pll;
  72. }
  73. static void
  74. intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
  75. struct intel_shared_dpll_state *shared_dpll)
  76. {
  77. enum intel_dpll_id i;
  78. /* Copy shared dpll state */
  79. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  80. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  81. shared_dpll[i] = pll->state;
  82. }
  83. }
  84. static struct intel_shared_dpll_state *
  85. intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
  86. {
  87. struct intel_atomic_state *state = to_intel_atomic_state(s);
  88. WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
  89. if (!state->dpll_set) {
  90. state->dpll_set = true;
  91. intel_atomic_duplicate_dpll_state(to_i915(s->dev),
  92. state->shared_dpll);
  93. }
  94. return state->shared_dpll;
  95. }
  96. /**
  97. * intel_get_shared_dpll_by_id - get a DPLL given its id
  98. * @dev_priv: i915 device instance
  99. * @id: pll id
  100. *
  101. * Returns:
  102. * A pointer to the DPLL with @id
  103. */
  104. struct intel_shared_dpll *
  105. intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
  106. enum intel_dpll_id id)
  107. {
  108. return &dev_priv->shared_dplls[id];
  109. }
  110. /**
  111. * intel_get_shared_dpll_id - get the id of a DPLL
  112. * @dev_priv: i915 device instance
  113. * @pll: the DPLL
  114. *
  115. * Returns:
  116. * The id of @pll
  117. */
  118. enum intel_dpll_id
  119. intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
  120. struct intel_shared_dpll *pll)
  121. {
  122. if (WARN_ON(pll < dev_priv->shared_dplls||
  123. pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
  124. return -1;
  125. return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
  126. }
  127. /* For ILK+ */
  128. void assert_shared_dpll(struct drm_i915_private *dev_priv,
  129. struct intel_shared_dpll *pll,
  130. bool state)
  131. {
  132. bool cur_state;
  133. struct intel_dpll_hw_state hw_state;
  134. if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
  135. return;
  136. cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
  137. I915_STATE_WARN(cur_state != state,
  138. "%s assertion failure (expected %s, current %s)\n",
  139. pll->name, onoff(state), onoff(cur_state));
  140. }
  141. /**
  142. * intel_prepare_shared_dpll - call a dpll's prepare hook
  143. * @crtc: CRTC which has a shared dpll
  144. *
  145. * This calls the PLL's prepare hook if it has one and if the PLL is not
  146. * already enabled. The prepare hook is platform specific.
  147. */
  148. void intel_prepare_shared_dpll(struct intel_crtc *crtc)
  149. {
  150. struct drm_device *dev = crtc->base.dev;
  151. struct drm_i915_private *dev_priv = to_i915(dev);
  152. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  153. if (WARN_ON(pll == NULL))
  154. return;
  155. mutex_lock(&dev_priv->dpll_lock);
  156. WARN_ON(!pll->state.crtc_mask);
  157. if (!pll->active_mask) {
  158. DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
  159. WARN_ON(pll->on);
  160. assert_shared_dpll_disabled(dev_priv, pll);
  161. pll->funcs.prepare(dev_priv, pll);
  162. }
  163. mutex_unlock(&dev_priv->dpll_lock);
  164. }
  165. /**
  166. * intel_enable_shared_dpll - enable a CRTC's shared DPLL
  167. * @crtc: CRTC which has a shared DPLL
  168. *
  169. * Enable the shared DPLL used by @crtc.
  170. */
  171. void intel_enable_shared_dpll(struct intel_crtc *crtc)
  172. {
  173. struct drm_device *dev = crtc->base.dev;
  174. struct drm_i915_private *dev_priv = to_i915(dev);
  175. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  176. unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
  177. unsigned old_mask;
  178. if (WARN_ON(pll == NULL))
  179. return;
  180. mutex_lock(&dev_priv->dpll_lock);
  181. old_mask = pll->active_mask;
  182. if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
  183. WARN_ON(pll->active_mask & crtc_mask))
  184. goto out;
  185. pll->active_mask |= crtc_mask;
  186. DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
  187. pll->name, pll->active_mask, pll->on,
  188. crtc->base.base.id);
  189. if (old_mask) {
  190. WARN_ON(!pll->on);
  191. assert_shared_dpll_enabled(dev_priv, pll);
  192. goto out;
  193. }
  194. WARN_ON(pll->on);
  195. DRM_DEBUG_KMS("enabling %s\n", pll->name);
  196. pll->funcs.enable(dev_priv, pll);
  197. pll->on = true;
  198. out:
  199. mutex_unlock(&dev_priv->dpll_lock);
  200. }
  201. /**
  202. * intel_disable_shared_dpll - disable a CRTC's shared DPLL
  203. * @crtc: CRTC which has a shared DPLL
  204. *
  205. * Disable the shared DPLL used by @crtc.
  206. */
  207. void intel_disable_shared_dpll(struct intel_crtc *crtc)
  208. {
  209. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  210. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  211. unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
  212. /* PCH only available on ILK+ */
  213. if (INTEL_GEN(dev_priv) < 5)
  214. return;
  215. if (pll == NULL)
  216. return;
  217. mutex_lock(&dev_priv->dpll_lock);
  218. if (WARN_ON(!(pll->active_mask & crtc_mask)))
  219. goto out;
  220. DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
  221. pll->name, pll->active_mask, pll->on,
  222. crtc->base.base.id);
  223. assert_shared_dpll_enabled(dev_priv, pll);
  224. WARN_ON(!pll->on);
  225. pll->active_mask &= ~crtc_mask;
  226. if (pll->active_mask)
  227. goto out;
  228. DRM_DEBUG_KMS("disabling %s\n", pll->name);
  229. pll->funcs.disable(dev_priv, pll);
  230. pll->on = false;
  231. out:
  232. mutex_unlock(&dev_priv->dpll_lock);
  233. }
  234. static struct intel_shared_dpll *
  235. intel_find_shared_dpll(struct intel_crtc *crtc,
  236. struct intel_crtc_state *crtc_state,
  237. enum intel_dpll_id range_min,
  238. enum intel_dpll_id range_max)
  239. {
  240. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  241. struct intel_shared_dpll *pll;
  242. struct intel_shared_dpll_state *shared_dpll;
  243. enum intel_dpll_id i;
  244. shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
  245. for (i = range_min; i <= range_max; i++) {
  246. pll = &dev_priv->shared_dplls[i];
  247. /* Only want to check enabled timings first */
  248. if (shared_dpll[i].crtc_mask == 0)
  249. continue;
  250. if (memcmp(&crtc_state->dpll_hw_state,
  251. &shared_dpll[i].hw_state,
  252. sizeof(crtc_state->dpll_hw_state)) == 0) {
  253. DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
  254. crtc->base.base.id, crtc->base.name, pll->name,
  255. shared_dpll[i].crtc_mask,
  256. pll->active_mask);
  257. return pll;
  258. }
  259. }
  260. /* Ok no matching timings, maybe there's a free one? */
  261. for (i = range_min; i <= range_max; i++) {
  262. pll = &dev_priv->shared_dplls[i];
  263. if (shared_dpll[i].crtc_mask == 0) {
  264. DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
  265. crtc->base.base.id, crtc->base.name, pll->name);
  266. return pll;
  267. }
  268. }
  269. return NULL;
  270. }
  271. static void
  272. intel_reference_shared_dpll(struct intel_shared_dpll *pll,
  273. struct intel_crtc_state *crtc_state)
  274. {
  275. struct intel_shared_dpll_state *shared_dpll;
  276. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  277. enum intel_dpll_id i = pll->id;
  278. shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
  279. if (shared_dpll[i].crtc_mask == 0)
  280. shared_dpll[i].hw_state =
  281. crtc_state->dpll_hw_state;
  282. crtc_state->shared_dpll = pll;
  283. DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
  284. pipe_name(crtc->pipe));
  285. shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
  286. }
  287. /**
  288. * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
  289. * @state: atomic state
  290. *
  291. * This is the dpll version of drm_atomic_helper_swap_state() since the
  292. * helper does not handle driver-specific global state.
  293. *
  294. * For consistency with atomic helpers this function does a complete swap,
  295. * i.e. it also puts the current state into @state, even though there is no
  296. * need for that at this moment.
  297. */
  298. void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
  299. {
  300. struct drm_i915_private *dev_priv = to_i915(state->dev);
  301. struct intel_shared_dpll_state *shared_dpll;
  302. struct intel_shared_dpll *pll;
  303. enum intel_dpll_id i;
  304. if (!to_intel_atomic_state(state)->dpll_set)
  305. return;
  306. shared_dpll = to_intel_atomic_state(state)->shared_dpll;
  307. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  308. struct intel_shared_dpll_state tmp;
  309. pll = &dev_priv->shared_dplls[i];
  310. tmp = pll->state;
  311. pll->state = shared_dpll[i];
  312. shared_dpll[i] = tmp;
  313. }
  314. }
  315. static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
  316. struct intel_shared_dpll *pll,
  317. struct intel_dpll_hw_state *hw_state)
  318. {
  319. uint32_t val;
  320. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  321. return false;
  322. val = I915_READ(PCH_DPLL(pll->id));
  323. hw_state->dpll = val;
  324. hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
  325. hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
  326. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  327. return val & DPLL_VCO_ENABLE;
  328. }
  329. static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
  330. struct intel_shared_dpll *pll)
  331. {
  332. I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
  333. I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
  334. }
  335. static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  336. {
  337. u32 val;
  338. bool enabled;
  339. I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
  340. val = I915_READ(PCH_DREF_CONTROL);
  341. enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
  342. DREF_SUPERSPREAD_SOURCE_MASK));
  343. I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  344. }
  345. static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
  346. struct intel_shared_dpll *pll)
  347. {
  348. /* PCH refclock must be enabled first */
  349. ibx_assert_pch_refclk_enabled(dev_priv);
  350. I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
  351. /* Wait for the clocks to stabilize. */
  352. POSTING_READ(PCH_DPLL(pll->id));
  353. udelay(150);
  354. /* The pixel multiplier can only be updated once the
  355. * DPLL is enabled and the clocks are stable.
  356. *
  357. * So write it again.
  358. */
  359. I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
  360. POSTING_READ(PCH_DPLL(pll->id));
  361. udelay(200);
  362. }
  363. static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
  364. struct intel_shared_dpll *pll)
  365. {
  366. struct drm_device *dev = &dev_priv->drm;
  367. struct intel_crtc *crtc;
  368. /* Make sure no transcoder isn't still depending on us. */
  369. for_each_intel_crtc(dev, crtc) {
  370. if (crtc->config->shared_dpll == pll)
  371. assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
  372. }
  373. I915_WRITE(PCH_DPLL(pll->id), 0);
  374. POSTING_READ(PCH_DPLL(pll->id));
  375. udelay(200);
  376. }
  377. static struct intel_shared_dpll *
  378. ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  379. struct intel_encoder *encoder)
  380. {
  381. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  382. struct intel_shared_dpll *pll;
  383. enum intel_dpll_id i;
  384. if (HAS_PCH_IBX(dev_priv)) {
  385. /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
  386. i = (enum intel_dpll_id) crtc->pipe;
  387. pll = &dev_priv->shared_dplls[i];
  388. DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  389. crtc->base.base.id, crtc->base.name, pll->name);
  390. } else {
  391. pll = intel_find_shared_dpll(crtc, crtc_state,
  392. DPLL_ID_PCH_PLL_A,
  393. DPLL_ID_PCH_PLL_B);
  394. }
  395. if (!pll)
  396. return NULL;
  397. /* reference the pll */
  398. intel_reference_shared_dpll(pll, crtc_state);
  399. return pll;
  400. }
  401. static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
  402. struct intel_dpll_hw_state *hw_state)
  403. {
  404. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  405. "fp0: 0x%x, fp1: 0x%x\n",
  406. hw_state->dpll,
  407. hw_state->dpll_md,
  408. hw_state->fp0,
  409. hw_state->fp1);
  410. }
  411. static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
  412. .prepare = ibx_pch_dpll_prepare,
  413. .enable = ibx_pch_dpll_enable,
  414. .disable = ibx_pch_dpll_disable,
  415. .get_hw_state = ibx_pch_dpll_get_hw_state,
  416. };
  417. static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
  418. struct intel_shared_dpll *pll)
  419. {
  420. I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
  421. POSTING_READ(WRPLL_CTL(pll->id));
  422. udelay(20);
  423. }
  424. static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
  425. struct intel_shared_dpll *pll)
  426. {
  427. I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
  428. POSTING_READ(SPLL_CTL);
  429. udelay(20);
  430. }
  431. static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
  432. struct intel_shared_dpll *pll)
  433. {
  434. uint32_t val;
  435. val = I915_READ(WRPLL_CTL(pll->id));
  436. I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
  437. POSTING_READ(WRPLL_CTL(pll->id));
  438. }
  439. static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
  440. struct intel_shared_dpll *pll)
  441. {
  442. uint32_t val;
  443. val = I915_READ(SPLL_CTL);
  444. I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
  445. POSTING_READ(SPLL_CTL);
  446. }
  447. static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
  448. struct intel_shared_dpll *pll,
  449. struct intel_dpll_hw_state *hw_state)
  450. {
  451. uint32_t val;
  452. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  453. return false;
  454. val = I915_READ(WRPLL_CTL(pll->id));
  455. hw_state->wrpll = val;
  456. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  457. return val & WRPLL_PLL_ENABLE;
  458. }
  459. static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
  460. struct intel_shared_dpll *pll,
  461. struct intel_dpll_hw_state *hw_state)
  462. {
  463. uint32_t val;
  464. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  465. return false;
  466. val = I915_READ(SPLL_CTL);
  467. hw_state->spll = val;
  468. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  469. return val & SPLL_PLL_ENABLE;
  470. }
  471. #define LC_FREQ 2700
  472. #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
  473. #define P_MIN 2
  474. #define P_MAX 64
  475. #define P_INC 2
  476. /* Constraints for PLL good behavior */
  477. #define REF_MIN 48
  478. #define REF_MAX 400
  479. #define VCO_MIN 2400
  480. #define VCO_MAX 4800
  481. struct hsw_wrpll_rnp {
  482. unsigned p, n2, r2;
  483. };
  484. static unsigned hsw_wrpll_get_budget_for_freq(int clock)
  485. {
  486. unsigned budget;
  487. switch (clock) {
  488. case 25175000:
  489. case 25200000:
  490. case 27000000:
  491. case 27027000:
  492. case 37762500:
  493. case 37800000:
  494. case 40500000:
  495. case 40541000:
  496. case 54000000:
  497. case 54054000:
  498. case 59341000:
  499. case 59400000:
  500. case 72000000:
  501. case 74176000:
  502. case 74250000:
  503. case 81000000:
  504. case 81081000:
  505. case 89012000:
  506. case 89100000:
  507. case 108000000:
  508. case 108108000:
  509. case 111264000:
  510. case 111375000:
  511. case 148352000:
  512. case 148500000:
  513. case 162000000:
  514. case 162162000:
  515. case 222525000:
  516. case 222750000:
  517. case 296703000:
  518. case 297000000:
  519. budget = 0;
  520. break;
  521. case 233500000:
  522. case 245250000:
  523. case 247750000:
  524. case 253250000:
  525. case 298000000:
  526. budget = 1500;
  527. break;
  528. case 169128000:
  529. case 169500000:
  530. case 179500000:
  531. case 202000000:
  532. budget = 2000;
  533. break;
  534. case 256250000:
  535. case 262500000:
  536. case 270000000:
  537. case 272500000:
  538. case 273750000:
  539. case 280750000:
  540. case 281250000:
  541. case 286000000:
  542. case 291750000:
  543. budget = 4000;
  544. break;
  545. case 267250000:
  546. case 268500000:
  547. budget = 5000;
  548. break;
  549. default:
  550. budget = 1000;
  551. break;
  552. }
  553. return budget;
  554. }
  555. static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
  556. unsigned r2, unsigned n2, unsigned p,
  557. struct hsw_wrpll_rnp *best)
  558. {
  559. uint64_t a, b, c, d, diff, diff_best;
  560. /* No best (r,n,p) yet */
  561. if (best->p == 0) {
  562. best->p = p;
  563. best->n2 = n2;
  564. best->r2 = r2;
  565. return;
  566. }
  567. /*
  568. * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
  569. * freq2k.
  570. *
  571. * delta = 1e6 *
  572. * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
  573. * freq2k;
  574. *
  575. * and we would like delta <= budget.
  576. *
  577. * If the discrepancy is above the PPM-based budget, always prefer to
  578. * improve upon the previous solution. However, if you're within the
  579. * budget, try to maximize Ref * VCO, that is N / (P * R^2).
  580. */
  581. a = freq2k * budget * p * r2;
  582. b = freq2k * budget * best->p * best->r2;
  583. diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
  584. diff_best = abs_diff(freq2k * best->p * best->r2,
  585. LC_FREQ_2K * best->n2);
  586. c = 1000000 * diff;
  587. d = 1000000 * diff_best;
  588. if (a < c && b < d) {
  589. /* If both are above the budget, pick the closer */
  590. if (best->p * best->r2 * diff < p * r2 * diff_best) {
  591. best->p = p;
  592. best->n2 = n2;
  593. best->r2 = r2;
  594. }
  595. } else if (a >= c && b < d) {
  596. /* If A is below the threshold but B is above it? Update. */
  597. best->p = p;
  598. best->n2 = n2;
  599. best->r2 = r2;
  600. } else if (a >= c && b >= d) {
  601. /* Both are below the limit, so pick the higher n2/(r2*r2) */
  602. if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
  603. best->p = p;
  604. best->n2 = n2;
  605. best->r2 = r2;
  606. }
  607. }
  608. /* Otherwise a < c && b >= d, do nothing */
  609. }
  610. static void
  611. hsw_ddi_calculate_wrpll(int clock /* in Hz */,
  612. unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
  613. {
  614. uint64_t freq2k;
  615. unsigned p, n2, r2;
  616. struct hsw_wrpll_rnp best = { 0, 0, 0 };
  617. unsigned budget;
  618. freq2k = clock / 100;
  619. budget = hsw_wrpll_get_budget_for_freq(clock);
  620. /* Special case handling for 540 pixel clock: bypass WR PLL entirely
  621. * and directly pass the LC PLL to it. */
  622. if (freq2k == 5400000) {
  623. *n2_out = 2;
  624. *p_out = 1;
  625. *r2_out = 2;
  626. return;
  627. }
  628. /*
  629. * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
  630. * the WR PLL.
  631. *
  632. * We want R so that REF_MIN <= Ref <= REF_MAX.
  633. * Injecting R2 = 2 * R gives:
  634. * REF_MAX * r2 > LC_FREQ * 2 and
  635. * REF_MIN * r2 < LC_FREQ * 2
  636. *
  637. * Which means the desired boundaries for r2 are:
  638. * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
  639. *
  640. */
  641. for (r2 = LC_FREQ * 2 / REF_MAX + 1;
  642. r2 <= LC_FREQ * 2 / REF_MIN;
  643. r2++) {
  644. /*
  645. * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
  646. *
  647. * Once again we want VCO_MIN <= VCO <= VCO_MAX.
  648. * Injecting R2 = 2 * R and N2 = 2 * N, we get:
  649. * VCO_MAX * r2 > n2 * LC_FREQ and
  650. * VCO_MIN * r2 < n2 * LC_FREQ)
  651. *
  652. * Which means the desired boundaries for n2 are:
  653. * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
  654. */
  655. for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
  656. n2 <= VCO_MAX * r2 / LC_FREQ;
  657. n2++) {
  658. for (p = P_MIN; p <= P_MAX; p += P_INC)
  659. hsw_wrpll_update_rnp(freq2k, budget,
  660. r2, n2, p, &best);
  661. }
  662. }
  663. *n2_out = best.n2;
  664. *p_out = best.p;
  665. *r2_out = best.r2;
  666. }
  667. static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
  668. struct intel_crtc *crtc,
  669. struct intel_crtc_state *crtc_state)
  670. {
  671. struct intel_shared_dpll *pll;
  672. uint32_t val;
  673. unsigned int p, n2, r2;
  674. hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
  675. val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
  676. WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
  677. WRPLL_DIVIDER_POST(p);
  678. crtc_state->dpll_hw_state.wrpll = val;
  679. pll = intel_find_shared_dpll(crtc, crtc_state,
  680. DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
  681. if (!pll)
  682. return NULL;
  683. return pll;
  684. }
  685. struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
  686. int clock)
  687. {
  688. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  689. struct intel_shared_dpll *pll;
  690. enum intel_dpll_id pll_id;
  691. switch (clock / 2) {
  692. case 81000:
  693. pll_id = DPLL_ID_LCPLL_810;
  694. break;
  695. case 135000:
  696. pll_id = DPLL_ID_LCPLL_1350;
  697. break;
  698. case 270000:
  699. pll_id = DPLL_ID_LCPLL_2700;
  700. break;
  701. default:
  702. DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
  703. return NULL;
  704. }
  705. pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
  706. if (!pll)
  707. return NULL;
  708. return pll;
  709. }
  710. static struct intel_shared_dpll *
  711. hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  712. struct intel_encoder *encoder)
  713. {
  714. struct intel_shared_dpll *pll;
  715. int clock = crtc_state->port_clock;
  716. memset(&crtc_state->dpll_hw_state, 0,
  717. sizeof(crtc_state->dpll_hw_state));
  718. if (encoder->type == INTEL_OUTPUT_HDMI) {
  719. pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
  720. } else if (encoder->type == INTEL_OUTPUT_DP ||
  721. encoder->type == INTEL_OUTPUT_DP_MST ||
  722. encoder->type == INTEL_OUTPUT_EDP) {
  723. pll = hsw_ddi_dp_get_dpll(encoder, clock);
  724. } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
  725. if (WARN_ON(crtc_state->port_clock / 2 != 135000))
  726. return NULL;
  727. crtc_state->dpll_hw_state.spll =
  728. SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
  729. pll = intel_find_shared_dpll(crtc, crtc_state,
  730. DPLL_ID_SPLL, DPLL_ID_SPLL);
  731. } else {
  732. return NULL;
  733. }
  734. if (!pll)
  735. return NULL;
  736. intel_reference_shared_dpll(pll, crtc_state);
  737. return pll;
  738. }
  739. static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
  740. struct intel_dpll_hw_state *hw_state)
  741. {
  742. DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  743. hw_state->wrpll, hw_state->spll);
  744. }
  745. static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
  746. .enable = hsw_ddi_wrpll_enable,
  747. .disable = hsw_ddi_wrpll_disable,
  748. .get_hw_state = hsw_ddi_wrpll_get_hw_state,
  749. };
  750. static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
  751. .enable = hsw_ddi_spll_enable,
  752. .disable = hsw_ddi_spll_disable,
  753. .get_hw_state = hsw_ddi_spll_get_hw_state,
  754. };
  755. static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
  756. struct intel_shared_dpll *pll)
  757. {
  758. }
  759. static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
  760. struct intel_shared_dpll *pll)
  761. {
  762. }
  763. static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
  764. struct intel_shared_dpll *pll,
  765. struct intel_dpll_hw_state *hw_state)
  766. {
  767. return true;
  768. }
  769. static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
  770. .enable = hsw_ddi_lcpll_enable,
  771. .disable = hsw_ddi_lcpll_disable,
  772. .get_hw_state = hsw_ddi_lcpll_get_hw_state,
  773. };
  774. struct skl_dpll_regs {
  775. i915_reg_t ctl, cfgcr1, cfgcr2;
  776. };
  777. /* this array is indexed by the *shared* pll id */
  778. static const struct skl_dpll_regs skl_dpll_regs[4] = {
  779. {
  780. /* DPLL 0 */
  781. .ctl = LCPLL1_CTL,
  782. /* DPLL 0 doesn't support HDMI mode */
  783. },
  784. {
  785. /* DPLL 1 */
  786. .ctl = LCPLL2_CTL,
  787. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
  788. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
  789. },
  790. {
  791. /* DPLL 2 */
  792. .ctl = WRPLL_CTL(0),
  793. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
  794. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
  795. },
  796. {
  797. /* DPLL 3 */
  798. .ctl = WRPLL_CTL(1),
  799. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
  800. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
  801. },
  802. };
  803. static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
  804. struct intel_shared_dpll *pll)
  805. {
  806. uint32_t val;
  807. val = I915_READ(DPLL_CTRL1);
  808. val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
  809. DPLL_CTRL1_LINK_RATE_MASK(pll->id));
  810. val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
  811. I915_WRITE(DPLL_CTRL1, val);
  812. POSTING_READ(DPLL_CTRL1);
  813. }
  814. static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  815. struct intel_shared_dpll *pll)
  816. {
  817. const struct skl_dpll_regs *regs = skl_dpll_regs;
  818. skl_ddi_pll_write_ctrl1(dev_priv, pll);
  819. I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
  820. I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
  821. POSTING_READ(regs[pll->id].cfgcr1);
  822. POSTING_READ(regs[pll->id].cfgcr2);
  823. /* the enable bit is always bit 31 */
  824. I915_WRITE(regs[pll->id].ctl,
  825. I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
  826. if (intel_wait_for_register(dev_priv,
  827. DPLL_STATUS,
  828. DPLL_LOCK(pll->id),
  829. DPLL_LOCK(pll->id),
  830. 5))
  831. DRM_ERROR("DPLL %d not locked\n", pll->id);
  832. }
  833. static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
  834. struct intel_shared_dpll *pll)
  835. {
  836. skl_ddi_pll_write_ctrl1(dev_priv, pll);
  837. }
  838. static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  839. struct intel_shared_dpll *pll)
  840. {
  841. const struct skl_dpll_regs *regs = skl_dpll_regs;
  842. /* the enable bit is always bit 31 */
  843. I915_WRITE(regs[pll->id].ctl,
  844. I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
  845. POSTING_READ(regs[pll->id].ctl);
  846. }
  847. static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
  848. struct intel_shared_dpll *pll)
  849. {
  850. }
  851. static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  852. struct intel_shared_dpll *pll,
  853. struct intel_dpll_hw_state *hw_state)
  854. {
  855. uint32_t val;
  856. const struct skl_dpll_regs *regs = skl_dpll_regs;
  857. bool ret;
  858. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  859. return false;
  860. ret = false;
  861. val = I915_READ(regs[pll->id].ctl);
  862. if (!(val & LCPLL_PLL_ENABLE))
  863. goto out;
  864. val = I915_READ(DPLL_CTRL1);
  865. hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
  866. /* avoid reading back stale values if HDMI mode is not enabled */
  867. if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
  868. hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
  869. hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
  870. }
  871. ret = true;
  872. out:
  873. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  874. return ret;
  875. }
  876. static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
  877. struct intel_shared_dpll *pll,
  878. struct intel_dpll_hw_state *hw_state)
  879. {
  880. uint32_t val;
  881. const struct skl_dpll_regs *regs = skl_dpll_regs;
  882. bool ret;
  883. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  884. return false;
  885. ret = false;
  886. /* DPLL0 is always enabled since it drives CDCLK */
  887. val = I915_READ(regs[pll->id].ctl);
  888. if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
  889. goto out;
  890. val = I915_READ(DPLL_CTRL1);
  891. hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
  892. ret = true;
  893. out:
  894. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  895. return ret;
  896. }
  897. struct skl_wrpll_context {
  898. uint64_t min_deviation; /* current minimal deviation */
  899. uint64_t central_freq; /* chosen central freq */
  900. uint64_t dco_freq; /* chosen dco freq */
  901. unsigned int p; /* chosen divider */
  902. };
  903. static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
  904. {
  905. memset(ctx, 0, sizeof(*ctx));
  906. ctx->min_deviation = U64_MAX;
  907. }
  908. /* DCO freq must be within +1%/-6% of the DCO central freq */
  909. #define SKL_DCO_MAX_PDEVIATION 100
  910. #define SKL_DCO_MAX_NDEVIATION 600
  911. static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
  912. uint64_t central_freq,
  913. uint64_t dco_freq,
  914. unsigned int divider)
  915. {
  916. uint64_t deviation;
  917. deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
  918. central_freq);
  919. /* positive deviation */
  920. if (dco_freq >= central_freq) {
  921. if (deviation < SKL_DCO_MAX_PDEVIATION &&
  922. deviation < ctx->min_deviation) {
  923. ctx->min_deviation = deviation;
  924. ctx->central_freq = central_freq;
  925. ctx->dco_freq = dco_freq;
  926. ctx->p = divider;
  927. }
  928. /* negative deviation */
  929. } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
  930. deviation < ctx->min_deviation) {
  931. ctx->min_deviation = deviation;
  932. ctx->central_freq = central_freq;
  933. ctx->dco_freq = dco_freq;
  934. ctx->p = divider;
  935. }
  936. }
  937. static void skl_wrpll_get_multipliers(unsigned int p,
  938. unsigned int *p0 /* out */,
  939. unsigned int *p1 /* out */,
  940. unsigned int *p2 /* out */)
  941. {
  942. /* even dividers */
  943. if (p % 2 == 0) {
  944. unsigned int half = p / 2;
  945. if (half == 1 || half == 2 || half == 3 || half == 5) {
  946. *p0 = 2;
  947. *p1 = 1;
  948. *p2 = half;
  949. } else if (half % 2 == 0) {
  950. *p0 = 2;
  951. *p1 = half / 2;
  952. *p2 = 2;
  953. } else if (half % 3 == 0) {
  954. *p0 = 3;
  955. *p1 = half / 3;
  956. *p2 = 2;
  957. } else if (half % 7 == 0) {
  958. *p0 = 7;
  959. *p1 = half / 7;
  960. *p2 = 2;
  961. }
  962. } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
  963. *p0 = 3;
  964. *p1 = 1;
  965. *p2 = p / 3;
  966. } else if (p == 5 || p == 7) {
  967. *p0 = p;
  968. *p1 = 1;
  969. *p2 = 1;
  970. } else if (p == 15) {
  971. *p0 = 3;
  972. *p1 = 1;
  973. *p2 = 5;
  974. } else if (p == 21) {
  975. *p0 = 7;
  976. *p1 = 1;
  977. *p2 = 3;
  978. } else if (p == 35) {
  979. *p0 = 7;
  980. *p1 = 1;
  981. *p2 = 5;
  982. }
  983. }
  984. struct skl_wrpll_params {
  985. uint32_t dco_fraction;
  986. uint32_t dco_integer;
  987. uint32_t qdiv_ratio;
  988. uint32_t qdiv_mode;
  989. uint32_t kdiv;
  990. uint32_t pdiv;
  991. uint32_t central_freq;
  992. };
  993. static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
  994. uint64_t afe_clock,
  995. uint64_t central_freq,
  996. uint32_t p0, uint32_t p1, uint32_t p2)
  997. {
  998. uint64_t dco_freq;
  999. switch (central_freq) {
  1000. case 9600000000ULL:
  1001. params->central_freq = 0;
  1002. break;
  1003. case 9000000000ULL:
  1004. params->central_freq = 1;
  1005. break;
  1006. case 8400000000ULL:
  1007. params->central_freq = 3;
  1008. }
  1009. switch (p0) {
  1010. case 1:
  1011. params->pdiv = 0;
  1012. break;
  1013. case 2:
  1014. params->pdiv = 1;
  1015. break;
  1016. case 3:
  1017. params->pdiv = 2;
  1018. break;
  1019. case 7:
  1020. params->pdiv = 4;
  1021. break;
  1022. default:
  1023. WARN(1, "Incorrect PDiv\n");
  1024. }
  1025. switch (p2) {
  1026. case 5:
  1027. params->kdiv = 0;
  1028. break;
  1029. case 2:
  1030. params->kdiv = 1;
  1031. break;
  1032. case 3:
  1033. params->kdiv = 2;
  1034. break;
  1035. case 1:
  1036. params->kdiv = 3;
  1037. break;
  1038. default:
  1039. WARN(1, "Incorrect KDiv\n");
  1040. }
  1041. params->qdiv_ratio = p1;
  1042. params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
  1043. dco_freq = p0 * p1 * p2 * afe_clock;
  1044. /*
  1045. * Intermediate values are in Hz.
  1046. * Divide by MHz to match bsepc
  1047. */
  1048. params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
  1049. params->dco_fraction =
  1050. div_u64((div_u64(dco_freq, 24) -
  1051. params->dco_integer * MHz(1)) * 0x8000, MHz(1));
  1052. }
  1053. static bool
  1054. skl_ddi_calculate_wrpll(int clock /* in Hz */,
  1055. struct skl_wrpll_params *wrpll_params)
  1056. {
  1057. uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
  1058. uint64_t dco_central_freq[3] = {8400000000ULL,
  1059. 9000000000ULL,
  1060. 9600000000ULL};
  1061. static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
  1062. 24, 28, 30, 32, 36, 40, 42, 44,
  1063. 48, 52, 54, 56, 60, 64, 66, 68,
  1064. 70, 72, 76, 78, 80, 84, 88, 90,
  1065. 92, 96, 98 };
  1066. static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
  1067. static const struct {
  1068. const int *list;
  1069. int n_dividers;
  1070. } dividers[] = {
  1071. { even_dividers, ARRAY_SIZE(even_dividers) },
  1072. { odd_dividers, ARRAY_SIZE(odd_dividers) },
  1073. };
  1074. struct skl_wrpll_context ctx;
  1075. unsigned int dco, d, i;
  1076. unsigned int p0, p1, p2;
  1077. skl_wrpll_context_init(&ctx);
  1078. for (d = 0; d < ARRAY_SIZE(dividers); d++) {
  1079. for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
  1080. for (i = 0; i < dividers[d].n_dividers; i++) {
  1081. unsigned int p = dividers[d].list[i];
  1082. uint64_t dco_freq = p * afe_clock;
  1083. skl_wrpll_try_divider(&ctx,
  1084. dco_central_freq[dco],
  1085. dco_freq,
  1086. p);
  1087. /*
  1088. * Skip the remaining dividers if we're sure to
  1089. * have found the definitive divider, we can't
  1090. * improve a 0 deviation.
  1091. */
  1092. if (ctx.min_deviation == 0)
  1093. goto skip_remaining_dividers;
  1094. }
  1095. }
  1096. skip_remaining_dividers:
  1097. /*
  1098. * If a solution is found with an even divider, prefer
  1099. * this one.
  1100. */
  1101. if (d == 0 && ctx.p)
  1102. break;
  1103. }
  1104. if (!ctx.p) {
  1105. DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
  1106. return false;
  1107. }
  1108. /*
  1109. * gcc incorrectly analyses that these can be used without being
  1110. * initialized. To be fair, it's hard to guess.
  1111. */
  1112. p0 = p1 = p2 = 0;
  1113. skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
  1114. skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
  1115. p0, p1, p2);
  1116. return true;
  1117. }
  1118. static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
  1119. struct intel_crtc_state *crtc_state,
  1120. int clock)
  1121. {
  1122. uint32_t ctrl1, cfgcr1, cfgcr2;
  1123. struct skl_wrpll_params wrpll_params = { 0, };
  1124. /*
  1125. * See comment in intel_dpll_hw_state to understand why we always use 0
  1126. * as the DPLL id in this function.
  1127. */
  1128. ctrl1 = DPLL_CTRL1_OVERRIDE(0);
  1129. ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
  1130. if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
  1131. return false;
  1132. cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
  1133. DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
  1134. wrpll_params.dco_integer;
  1135. cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
  1136. DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
  1137. DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
  1138. DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
  1139. wrpll_params.central_freq;
  1140. memset(&crtc_state->dpll_hw_state, 0,
  1141. sizeof(crtc_state->dpll_hw_state));
  1142. crtc_state->dpll_hw_state.ctrl1 = ctrl1;
  1143. crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
  1144. crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
  1145. return true;
  1146. }
  1147. bool skl_ddi_dp_set_dpll_hw_state(int clock,
  1148. struct intel_dpll_hw_state *dpll_hw_state)
  1149. {
  1150. uint32_t ctrl1;
  1151. /*
  1152. * See comment in intel_dpll_hw_state to understand why we always use 0
  1153. * as the DPLL id in this function.
  1154. */
  1155. ctrl1 = DPLL_CTRL1_OVERRIDE(0);
  1156. switch (clock / 2) {
  1157. case 81000:
  1158. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
  1159. break;
  1160. case 135000:
  1161. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
  1162. break;
  1163. case 270000:
  1164. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
  1165. break;
  1166. /* eDP 1.4 rates */
  1167. case 162000:
  1168. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
  1169. break;
  1170. case 108000:
  1171. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
  1172. break;
  1173. case 216000:
  1174. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
  1175. break;
  1176. }
  1177. dpll_hw_state->ctrl1 = ctrl1;
  1178. return true;
  1179. }
  1180. static struct intel_shared_dpll *
  1181. skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  1182. struct intel_encoder *encoder)
  1183. {
  1184. struct intel_shared_dpll *pll;
  1185. int clock = crtc_state->port_clock;
  1186. bool bret;
  1187. struct intel_dpll_hw_state dpll_hw_state;
  1188. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  1189. if (encoder->type == INTEL_OUTPUT_HDMI) {
  1190. bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
  1191. if (!bret) {
  1192. DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
  1193. return NULL;
  1194. }
  1195. } else if (encoder->type == INTEL_OUTPUT_DP ||
  1196. encoder->type == INTEL_OUTPUT_DP_MST ||
  1197. encoder->type == INTEL_OUTPUT_EDP) {
  1198. bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
  1199. if (!bret) {
  1200. DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
  1201. return NULL;
  1202. }
  1203. crtc_state->dpll_hw_state = dpll_hw_state;
  1204. } else {
  1205. return NULL;
  1206. }
  1207. if (encoder->type == INTEL_OUTPUT_EDP)
  1208. pll = intel_find_shared_dpll(crtc, crtc_state,
  1209. DPLL_ID_SKL_DPLL0,
  1210. DPLL_ID_SKL_DPLL0);
  1211. else
  1212. pll = intel_find_shared_dpll(crtc, crtc_state,
  1213. DPLL_ID_SKL_DPLL1,
  1214. DPLL_ID_SKL_DPLL3);
  1215. if (!pll)
  1216. return NULL;
  1217. intel_reference_shared_dpll(pll, crtc_state);
  1218. return pll;
  1219. }
  1220. static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
  1221. struct intel_dpll_hw_state *hw_state)
  1222. {
  1223. DRM_DEBUG_KMS("dpll_hw_state: "
  1224. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  1225. hw_state->ctrl1,
  1226. hw_state->cfgcr1,
  1227. hw_state->cfgcr2);
  1228. }
  1229. static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
  1230. .enable = skl_ddi_pll_enable,
  1231. .disable = skl_ddi_pll_disable,
  1232. .get_hw_state = skl_ddi_pll_get_hw_state,
  1233. };
  1234. static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
  1235. .enable = skl_ddi_dpll0_enable,
  1236. .disable = skl_ddi_dpll0_disable,
  1237. .get_hw_state = skl_ddi_dpll0_get_hw_state,
  1238. };
  1239. static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
  1240. struct intel_shared_dpll *pll)
  1241. {
  1242. uint32_t temp;
  1243. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1244. enum dpio_phy phy;
  1245. enum dpio_channel ch;
  1246. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  1247. /* Non-SSC reference */
  1248. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1249. temp |= PORT_PLL_REF_SEL;
  1250. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1251. if (IS_GEMINILAKE(dev_priv)) {
  1252. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1253. temp |= PORT_PLL_POWER_ENABLE;
  1254. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1255. if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
  1256. PORT_PLL_POWER_STATE), 200))
  1257. DRM_ERROR("Power state not set for PLL:%d\n", port);
  1258. }
  1259. /* Disable 10 bit clock */
  1260. temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1261. temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
  1262. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1263. /* Write P1 & P2 */
  1264. temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
  1265. temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
  1266. temp |= pll->state.hw_state.ebb0;
  1267. I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
  1268. /* Write M2 integer */
  1269. temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
  1270. temp &= ~PORT_PLL_M2_MASK;
  1271. temp |= pll->state.hw_state.pll0;
  1272. I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
  1273. /* Write N */
  1274. temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
  1275. temp &= ~PORT_PLL_N_MASK;
  1276. temp |= pll->state.hw_state.pll1;
  1277. I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
  1278. /* Write M2 fraction */
  1279. temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
  1280. temp &= ~PORT_PLL_M2_FRAC_MASK;
  1281. temp |= pll->state.hw_state.pll2;
  1282. I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
  1283. /* Write M2 fraction enable */
  1284. temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
  1285. temp &= ~PORT_PLL_M2_FRAC_ENABLE;
  1286. temp |= pll->state.hw_state.pll3;
  1287. I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
  1288. /* Write coeff */
  1289. temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
  1290. temp &= ~PORT_PLL_PROP_COEFF_MASK;
  1291. temp &= ~PORT_PLL_INT_COEFF_MASK;
  1292. temp &= ~PORT_PLL_GAIN_CTL_MASK;
  1293. temp |= pll->state.hw_state.pll6;
  1294. I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
  1295. /* Write calibration val */
  1296. temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
  1297. temp &= ~PORT_PLL_TARGET_CNT_MASK;
  1298. temp |= pll->state.hw_state.pll8;
  1299. I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
  1300. temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
  1301. temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
  1302. temp |= pll->state.hw_state.pll9;
  1303. I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
  1304. temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
  1305. temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
  1306. temp &= ~PORT_PLL_DCO_AMP_MASK;
  1307. temp |= pll->state.hw_state.pll10;
  1308. I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
  1309. /* Recalibrate with new settings */
  1310. temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1311. temp |= PORT_PLL_RECALIBRATE;
  1312. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1313. temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
  1314. temp |= pll->state.hw_state.ebb4;
  1315. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1316. /* Enable PLL */
  1317. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1318. temp |= PORT_PLL_ENABLE;
  1319. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1320. POSTING_READ(BXT_PORT_PLL_ENABLE(port));
  1321. if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
  1322. 200))
  1323. DRM_ERROR("PLL %d not locked\n", port);
  1324. if (IS_GEMINILAKE(dev_priv)) {
  1325. temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
  1326. temp |= DCC_DELAY_RANGE_2;
  1327. I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
  1328. }
  1329. /*
  1330. * While we write to the group register to program all lanes at once we
  1331. * can read only lane registers and we pick lanes 0/1 for that.
  1332. */
  1333. temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
  1334. temp &= ~LANE_STAGGER_MASK;
  1335. temp &= ~LANESTAGGER_STRAP_OVRD;
  1336. temp |= pll->state.hw_state.pcsdw12;
  1337. I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
  1338. }
  1339. static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
  1340. struct intel_shared_dpll *pll)
  1341. {
  1342. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1343. uint32_t temp;
  1344. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1345. temp &= ~PORT_PLL_ENABLE;
  1346. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1347. POSTING_READ(BXT_PORT_PLL_ENABLE(port));
  1348. if (IS_GEMINILAKE(dev_priv)) {
  1349. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1350. temp &= ~PORT_PLL_POWER_ENABLE;
  1351. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1352. if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
  1353. PORT_PLL_POWER_STATE), 200))
  1354. DRM_ERROR("Power state not reset for PLL:%d\n", port);
  1355. }
  1356. }
  1357. static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  1358. struct intel_shared_dpll *pll,
  1359. struct intel_dpll_hw_state *hw_state)
  1360. {
  1361. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1362. uint32_t val;
  1363. bool ret;
  1364. enum dpio_phy phy;
  1365. enum dpio_channel ch;
  1366. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  1367. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  1368. return false;
  1369. ret = false;
  1370. val = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1371. if (!(val & PORT_PLL_ENABLE))
  1372. goto out;
  1373. hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
  1374. hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
  1375. hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1376. hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
  1377. hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
  1378. hw_state->pll0 &= PORT_PLL_M2_MASK;
  1379. hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
  1380. hw_state->pll1 &= PORT_PLL_N_MASK;
  1381. hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
  1382. hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
  1383. hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
  1384. hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
  1385. hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
  1386. hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
  1387. PORT_PLL_INT_COEFF_MASK |
  1388. PORT_PLL_GAIN_CTL_MASK;
  1389. hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
  1390. hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
  1391. hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
  1392. hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
  1393. hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
  1394. hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
  1395. PORT_PLL_DCO_AMP_MASK;
  1396. /*
  1397. * While we write to the group register to program all lanes at once we
  1398. * can read only lane registers. We configure all lanes the same way, so
  1399. * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
  1400. */
  1401. hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
  1402. if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
  1403. DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
  1404. hw_state->pcsdw12,
  1405. I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
  1406. hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
  1407. ret = true;
  1408. out:
  1409. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  1410. return ret;
  1411. }
  1412. /* bxt clock parameters */
  1413. struct bxt_clk_div {
  1414. int clock;
  1415. uint32_t p1;
  1416. uint32_t p2;
  1417. uint32_t m2_int;
  1418. uint32_t m2_frac;
  1419. bool m2_frac_en;
  1420. uint32_t n;
  1421. int vco;
  1422. };
  1423. /* pre-calculated values for DP linkrates */
  1424. static const struct bxt_clk_div bxt_dp_clk_val[] = {
  1425. {162000, 4, 2, 32, 1677722, 1, 1},
  1426. {270000, 4, 1, 27, 0, 0, 1},
  1427. {540000, 2, 1, 27, 0, 0, 1},
  1428. {216000, 3, 2, 32, 1677722, 1, 1},
  1429. {243000, 4, 1, 24, 1258291, 1, 1},
  1430. {324000, 4, 1, 32, 1677722, 1, 1},
  1431. {432000, 3, 1, 32, 1677722, 1, 1}
  1432. };
  1433. static bool
  1434. bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
  1435. struct intel_crtc_state *crtc_state, int clock,
  1436. struct bxt_clk_div *clk_div)
  1437. {
  1438. struct dpll best_clock;
  1439. /* Calculate HDMI div */
  1440. /*
  1441. * FIXME: tie the following calculation into
  1442. * i9xx_crtc_compute_clock
  1443. */
  1444. if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
  1445. DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
  1446. clock, pipe_name(intel_crtc->pipe));
  1447. return false;
  1448. }
  1449. clk_div->p1 = best_clock.p1;
  1450. clk_div->p2 = best_clock.p2;
  1451. WARN_ON(best_clock.m1 != 2);
  1452. clk_div->n = best_clock.n;
  1453. clk_div->m2_int = best_clock.m2 >> 22;
  1454. clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
  1455. clk_div->m2_frac_en = clk_div->m2_frac != 0;
  1456. clk_div->vco = best_clock.vco;
  1457. return true;
  1458. }
  1459. static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
  1460. {
  1461. int i;
  1462. *clk_div = bxt_dp_clk_val[0];
  1463. for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
  1464. if (bxt_dp_clk_val[i].clock == clock) {
  1465. *clk_div = bxt_dp_clk_val[i];
  1466. break;
  1467. }
  1468. }
  1469. clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
  1470. }
  1471. static bool bxt_ddi_set_dpll_hw_state(int clock,
  1472. struct bxt_clk_div *clk_div,
  1473. struct intel_dpll_hw_state *dpll_hw_state)
  1474. {
  1475. int vco = clk_div->vco;
  1476. uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
  1477. uint32_t lanestagger;
  1478. if (vco >= 6200000 && vco <= 6700000) {
  1479. prop_coef = 4;
  1480. int_coef = 9;
  1481. gain_ctl = 3;
  1482. targ_cnt = 8;
  1483. } else if ((vco > 5400000 && vco < 6200000) ||
  1484. (vco >= 4800000 && vco < 5400000)) {
  1485. prop_coef = 5;
  1486. int_coef = 11;
  1487. gain_ctl = 3;
  1488. targ_cnt = 9;
  1489. } else if (vco == 5400000) {
  1490. prop_coef = 3;
  1491. int_coef = 8;
  1492. gain_ctl = 1;
  1493. targ_cnt = 9;
  1494. } else {
  1495. DRM_ERROR("Invalid VCO\n");
  1496. return false;
  1497. }
  1498. if (clock > 270000)
  1499. lanestagger = 0x18;
  1500. else if (clock > 135000)
  1501. lanestagger = 0x0d;
  1502. else if (clock > 67000)
  1503. lanestagger = 0x07;
  1504. else if (clock > 33000)
  1505. lanestagger = 0x04;
  1506. else
  1507. lanestagger = 0x02;
  1508. dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
  1509. dpll_hw_state->pll0 = clk_div->m2_int;
  1510. dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
  1511. dpll_hw_state->pll2 = clk_div->m2_frac;
  1512. if (clk_div->m2_frac_en)
  1513. dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
  1514. dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
  1515. dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
  1516. dpll_hw_state->pll8 = targ_cnt;
  1517. dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
  1518. dpll_hw_state->pll10 =
  1519. PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
  1520. | PORT_PLL_DCO_AMP_OVR_EN_H;
  1521. dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
  1522. dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
  1523. return true;
  1524. }
  1525. bool bxt_ddi_dp_set_dpll_hw_state(int clock,
  1526. struct intel_dpll_hw_state *dpll_hw_state)
  1527. {
  1528. struct bxt_clk_div clk_div = {0};
  1529. bxt_ddi_dp_pll_dividers(clock, &clk_div);
  1530. return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
  1531. }
  1532. static bool
  1533. bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
  1534. struct intel_crtc_state *crtc_state, int clock,
  1535. struct intel_dpll_hw_state *dpll_hw_state)
  1536. {
  1537. struct bxt_clk_div clk_div = { };
  1538. bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
  1539. return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
  1540. }
  1541. static struct intel_shared_dpll *
  1542. bxt_get_dpll(struct intel_crtc *crtc,
  1543. struct intel_crtc_state *crtc_state,
  1544. struct intel_encoder *encoder)
  1545. {
  1546. struct intel_dpll_hw_state dpll_hw_state = { };
  1547. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1548. struct intel_digital_port *intel_dig_port;
  1549. struct intel_shared_dpll *pll;
  1550. int i, clock = crtc_state->port_clock;
  1551. if (encoder->type == INTEL_OUTPUT_HDMI &&
  1552. !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
  1553. &dpll_hw_state))
  1554. return NULL;
  1555. if ((encoder->type == INTEL_OUTPUT_DP ||
  1556. encoder->type == INTEL_OUTPUT_EDP) &&
  1557. !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
  1558. return NULL;
  1559. memset(&crtc_state->dpll_hw_state, 0,
  1560. sizeof(crtc_state->dpll_hw_state));
  1561. crtc_state->dpll_hw_state = dpll_hw_state;
  1562. if (encoder->type == INTEL_OUTPUT_DP_MST) {
  1563. struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
  1564. intel_dig_port = intel_mst->primary;
  1565. } else
  1566. intel_dig_port = enc_to_dig_port(&encoder->base);
  1567. /* 1:1 mapping between ports and PLLs */
  1568. i = (enum intel_dpll_id) intel_dig_port->port;
  1569. pll = intel_get_shared_dpll_by_id(dev_priv, i);
  1570. DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  1571. crtc->base.base.id, crtc->base.name, pll->name);
  1572. intel_reference_shared_dpll(pll, crtc_state);
  1573. return pll;
  1574. }
  1575. static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
  1576. struct intel_dpll_hw_state *hw_state)
  1577. {
  1578. DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  1579. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  1580. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  1581. hw_state->ebb0,
  1582. hw_state->ebb4,
  1583. hw_state->pll0,
  1584. hw_state->pll1,
  1585. hw_state->pll2,
  1586. hw_state->pll3,
  1587. hw_state->pll6,
  1588. hw_state->pll8,
  1589. hw_state->pll9,
  1590. hw_state->pll10,
  1591. hw_state->pcsdw12);
  1592. }
  1593. static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
  1594. .enable = bxt_ddi_pll_enable,
  1595. .disable = bxt_ddi_pll_disable,
  1596. .get_hw_state = bxt_ddi_pll_get_hw_state,
  1597. };
  1598. static void intel_ddi_pll_init(struct drm_device *dev)
  1599. {
  1600. struct drm_i915_private *dev_priv = to_i915(dev);
  1601. if (INTEL_GEN(dev_priv) < 9) {
  1602. uint32_t val = I915_READ(LCPLL_CTL);
  1603. /*
  1604. * The LCPLL register should be turned on by the BIOS. For now
  1605. * let's just check its state and print errors in case
  1606. * something is wrong. Don't even try to turn it on.
  1607. */
  1608. if (val & LCPLL_CD_SOURCE_FCLK)
  1609. DRM_ERROR("CDCLK source is not LCPLL\n");
  1610. if (val & LCPLL_PLL_DISABLE)
  1611. DRM_ERROR("LCPLL is disabled\n");
  1612. }
  1613. }
  1614. struct dpll_info {
  1615. const char *name;
  1616. const int id;
  1617. const struct intel_shared_dpll_funcs *funcs;
  1618. uint32_t flags;
  1619. };
  1620. struct intel_dpll_mgr {
  1621. const struct dpll_info *dpll_info;
  1622. struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
  1623. struct intel_crtc_state *crtc_state,
  1624. struct intel_encoder *encoder);
  1625. void (*dump_hw_state)(struct drm_i915_private *dev_priv,
  1626. struct intel_dpll_hw_state *hw_state);
  1627. };
  1628. static const struct dpll_info pch_plls[] = {
  1629. { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
  1630. { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
  1631. { NULL, -1, NULL, 0 },
  1632. };
  1633. static const struct intel_dpll_mgr pch_pll_mgr = {
  1634. .dpll_info = pch_plls,
  1635. .get_dpll = ibx_get_dpll,
  1636. .dump_hw_state = ibx_dump_hw_state,
  1637. };
  1638. static const struct dpll_info hsw_plls[] = {
  1639. { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
  1640. { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
  1641. { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
  1642. { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1643. { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1644. { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1645. { NULL, -1, NULL, },
  1646. };
  1647. static const struct intel_dpll_mgr hsw_pll_mgr = {
  1648. .dpll_info = hsw_plls,
  1649. .get_dpll = hsw_get_dpll,
  1650. .dump_hw_state = hsw_dump_hw_state,
  1651. };
  1652. static const struct dpll_info skl_plls[] = {
  1653. { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
  1654. { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
  1655. { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
  1656. { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
  1657. { NULL, -1, NULL, },
  1658. };
  1659. static const struct intel_dpll_mgr skl_pll_mgr = {
  1660. .dpll_info = skl_plls,
  1661. .get_dpll = skl_get_dpll,
  1662. .dump_hw_state = skl_dump_hw_state,
  1663. };
  1664. static const struct dpll_info bxt_plls[] = {
  1665. { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
  1666. { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
  1667. { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
  1668. { NULL, -1, NULL, },
  1669. };
  1670. static const struct intel_dpll_mgr bxt_pll_mgr = {
  1671. .dpll_info = bxt_plls,
  1672. .get_dpll = bxt_get_dpll,
  1673. .dump_hw_state = bxt_dump_hw_state,
  1674. };
  1675. /**
  1676. * intel_shared_dpll_init - Initialize shared DPLLs
  1677. * @dev: drm device
  1678. *
  1679. * Initialize shared DPLLs for @dev.
  1680. */
  1681. void intel_shared_dpll_init(struct drm_device *dev)
  1682. {
  1683. struct drm_i915_private *dev_priv = to_i915(dev);
  1684. const struct intel_dpll_mgr *dpll_mgr = NULL;
  1685. const struct dpll_info *dpll_info;
  1686. int i;
  1687. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  1688. dpll_mgr = &skl_pll_mgr;
  1689. else if (IS_GEN9_LP(dev_priv))
  1690. dpll_mgr = &bxt_pll_mgr;
  1691. else if (HAS_DDI(dev_priv))
  1692. dpll_mgr = &hsw_pll_mgr;
  1693. else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  1694. dpll_mgr = &pch_pll_mgr;
  1695. if (!dpll_mgr) {
  1696. dev_priv->num_shared_dpll = 0;
  1697. return;
  1698. }
  1699. dpll_info = dpll_mgr->dpll_info;
  1700. for (i = 0; dpll_info[i].id >= 0; i++) {
  1701. WARN_ON(i != dpll_info[i].id);
  1702. dev_priv->shared_dplls[i].id = dpll_info[i].id;
  1703. dev_priv->shared_dplls[i].name = dpll_info[i].name;
  1704. dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
  1705. dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
  1706. }
  1707. dev_priv->dpll_mgr = dpll_mgr;
  1708. dev_priv->num_shared_dpll = i;
  1709. mutex_init(&dev_priv->dpll_lock);
  1710. BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
  1711. /* FIXME: Move this to a more suitable place */
  1712. if (HAS_DDI(dev_priv))
  1713. intel_ddi_pll_init(dev);
  1714. }
  1715. /**
  1716. * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
  1717. * @crtc: CRTC
  1718. * @crtc_state: atomic state for @crtc
  1719. * @encoder: encoder
  1720. *
  1721. * Find an appropriate DPLL for the given CRTC and encoder combination. A
  1722. * reference from the @crtc to the returned pll is registered in the atomic
  1723. * state. That configuration is made effective by calling
  1724. * intel_shared_dpll_swap_state(). The reference should be released by calling
  1725. * intel_release_shared_dpll().
  1726. *
  1727. * Returns:
  1728. * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
  1729. */
  1730. struct intel_shared_dpll *
  1731. intel_get_shared_dpll(struct intel_crtc *crtc,
  1732. struct intel_crtc_state *crtc_state,
  1733. struct intel_encoder *encoder)
  1734. {
  1735. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1736. const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
  1737. if (WARN_ON(!dpll_mgr))
  1738. return NULL;
  1739. return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
  1740. }
  1741. /**
  1742. * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
  1743. * @dpll: dpll in use by @crtc
  1744. * @crtc: crtc
  1745. * @state: atomic state
  1746. *
  1747. * This function releases the reference from @crtc to @dpll from the
  1748. * atomic @state. The new configuration is made effective by calling
  1749. * intel_shared_dpll_swap_state().
  1750. */
  1751. void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
  1752. struct intel_crtc *crtc,
  1753. struct drm_atomic_state *state)
  1754. {
  1755. struct intel_shared_dpll_state *shared_dpll_state;
  1756. shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
  1757. shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
  1758. }
  1759. /**
  1760. * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
  1761. * @dev_priv: i915 drm device
  1762. * @hw_state: hw state to be written to the log
  1763. *
  1764. * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
  1765. */
  1766. void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
  1767. struct intel_dpll_hw_state *hw_state)
  1768. {
  1769. if (dev_priv->dpll_mgr) {
  1770. dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
  1771. } else {
  1772. /* fallback for platforms that don't use the shared dpll
  1773. * infrastructure
  1774. */
  1775. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  1776. "fp0: 0x%x, fp1: 0x%x\n",
  1777. hw_state->dpll,
  1778. hw_state->dpll_md,
  1779. hw_state->fp0,
  1780. hw_state->fp1);
  1781. }
  1782. }