intel_dpll_mgr.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510
  1. /*
  2. * Copyright © 2006-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. #include "intel_drv.h"
  24. /**
  25. * DOC: Display PLLs
  26. *
  27. * Display PLLs used for driving outputs vary by platform. While some have
  28. * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
  29. * from a pool. In the latter scenario, it is possible that multiple pipes
  30. * share a PLL if their configurations match.
  31. *
  32. * This file provides an abstraction over display PLLs. The function
  33. * intel_shared_dpll_init() initializes the PLLs for the given platform. The
  34. * users of a PLL are tracked and that tracking is integrated with the atomic
  35. * modest interface. During an atomic operation, a PLL can be requested for a
  36. * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
  37. * a previously used PLL can be released with intel_release_shared_dpll().
  38. * Changes to the users are first staged in the atomic state, and then made
  39. * effective by calling intel_shared_dpll_swap_state() during the atomic
  40. * commit phase.
  41. */
  42. static void
  43. intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
  44. struct intel_shared_dpll_state *shared_dpll)
  45. {
  46. enum intel_dpll_id i;
  47. /* Copy shared dpll state */
  48. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  49. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  50. shared_dpll[i] = pll->state;
  51. }
  52. }
  53. static struct intel_shared_dpll_state *
  54. intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
  55. {
  56. struct intel_atomic_state *state = to_intel_atomic_state(s);
  57. WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
  58. if (!state->dpll_set) {
  59. state->dpll_set = true;
  60. intel_atomic_duplicate_dpll_state(to_i915(s->dev),
  61. state->shared_dpll);
  62. }
  63. return state->shared_dpll;
  64. }
  65. /**
  66. * intel_get_shared_dpll_by_id - get a DPLL given its id
  67. * @dev_priv: i915 device instance
  68. * @id: pll id
  69. *
  70. * Returns:
  71. * A pointer to the DPLL with @id
  72. */
  73. struct intel_shared_dpll *
  74. intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
  75. enum intel_dpll_id id)
  76. {
  77. return &dev_priv->shared_dplls[id];
  78. }
  79. /**
  80. * intel_get_shared_dpll_id - get the id of a DPLL
  81. * @dev_priv: i915 device instance
  82. * @pll: the DPLL
  83. *
  84. * Returns:
  85. * The id of @pll
  86. */
  87. enum intel_dpll_id
  88. intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
  89. struct intel_shared_dpll *pll)
  90. {
  91. if (WARN_ON(pll < dev_priv->shared_dplls||
  92. pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
  93. return -1;
  94. return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
  95. }
  96. /* For ILK+ */
  97. void assert_shared_dpll(struct drm_i915_private *dev_priv,
  98. struct intel_shared_dpll *pll,
  99. bool state)
  100. {
  101. bool cur_state;
  102. struct intel_dpll_hw_state hw_state;
  103. if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
  104. return;
  105. cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
  106. I915_STATE_WARN(cur_state != state,
  107. "%s assertion failure (expected %s, current %s)\n",
  108. pll->name, onoff(state), onoff(cur_state));
  109. }
  110. /**
  111. * intel_prepare_shared_dpll - call a dpll's prepare hook
  112. * @crtc: CRTC which has a shared dpll
  113. *
  114. * This calls the PLL's prepare hook if it has one and if the PLL is not
  115. * already enabled. The prepare hook is platform specific.
  116. */
  117. void intel_prepare_shared_dpll(struct intel_crtc *crtc)
  118. {
  119. struct drm_device *dev = crtc->base.dev;
  120. struct drm_i915_private *dev_priv = to_i915(dev);
  121. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  122. if (WARN_ON(pll == NULL))
  123. return;
  124. mutex_lock(&dev_priv->dpll_lock);
  125. WARN_ON(!pll->state.crtc_mask);
  126. if (!pll->active_mask) {
  127. DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
  128. WARN_ON(pll->on);
  129. assert_shared_dpll_disabled(dev_priv, pll);
  130. pll->funcs.prepare(dev_priv, pll);
  131. }
  132. mutex_unlock(&dev_priv->dpll_lock);
  133. }
  134. /**
  135. * intel_enable_shared_dpll - enable a CRTC's shared DPLL
  136. * @crtc: CRTC which has a shared DPLL
  137. *
  138. * Enable the shared DPLL used by @crtc.
  139. */
  140. void intel_enable_shared_dpll(struct intel_crtc *crtc)
  141. {
  142. struct drm_device *dev = crtc->base.dev;
  143. struct drm_i915_private *dev_priv = to_i915(dev);
  144. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  145. unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
  146. unsigned old_mask;
  147. if (WARN_ON(pll == NULL))
  148. return;
  149. mutex_lock(&dev_priv->dpll_lock);
  150. old_mask = pll->active_mask;
  151. if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
  152. WARN_ON(pll->active_mask & crtc_mask))
  153. goto out;
  154. pll->active_mask |= crtc_mask;
  155. DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
  156. pll->name, pll->active_mask, pll->on,
  157. crtc->base.base.id);
  158. if (old_mask) {
  159. WARN_ON(!pll->on);
  160. assert_shared_dpll_enabled(dev_priv, pll);
  161. goto out;
  162. }
  163. WARN_ON(pll->on);
  164. DRM_DEBUG_KMS("enabling %s\n", pll->name);
  165. pll->funcs.enable(dev_priv, pll);
  166. pll->on = true;
  167. out:
  168. mutex_unlock(&dev_priv->dpll_lock);
  169. }
  170. /**
  171. * intel_disable_shared_dpll - disable a CRTC's shared DPLL
  172. * @crtc: CRTC which has a shared DPLL
  173. *
  174. * Disable the shared DPLL used by @crtc.
  175. */
  176. void intel_disable_shared_dpll(struct intel_crtc *crtc)
  177. {
  178. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  179. struct intel_shared_dpll *pll = crtc->config->shared_dpll;
  180. unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
  181. /* PCH only available on ILK+ */
  182. if (INTEL_GEN(dev_priv) < 5)
  183. return;
  184. if (pll == NULL)
  185. return;
  186. mutex_lock(&dev_priv->dpll_lock);
  187. if (WARN_ON(!(pll->active_mask & crtc_mask)))
  188. goto out;
  189. DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
  190. pll->name, pll->active_mask, pll->on,
  191. crtc->base.base.id);
  192. assert_shared_dpll_enabled(dev_priv, pll);
  193. WARN_ON(!pll->on);
  194. pll->active_mask &= ~crtc_mask;
  195. if (pll->active_mask)
  196. goto out;
  197. DRM_DEBUG_KMS("disabling %s\n", pll->name);
  198. pll->funcs.disable(dev_priv, pll);
  199. pll->on = false;
  200. out:
  201. mutex_unlock(&dev_priv->dpll_lock);
  202. }
  203. static struct intel_shared_dpll *
  204. intel_find_shared_dpll(struct intel_crtc *crtc,
  205. struct intel_crtc_state *crtc_state,
  206. enum intel_dpll_id range_min,
  207. enum intel_dpll_id range_max)
  208. {
  209. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  210. struct intel_shared_dpll *pll;
  211. struct intel_shared_dpll_state *shared_dpll;
  212. enum intel_dpll_id i;
  213. shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
  214. for (i = range_min; i <= range_max; i++) {
  215. pll = &dev_priv->shared_dplls[i];
  216. /* Only want to check enabled timings first */
  217. if (shared_dpll[i].crtc_mask == 0)
  218. continue;
  219. if (memcmp(&crtc_state->dpll_hw_state,
  220. &shared_dpll[i].hw_state,
  221. sizeof(crtc_state->dpll_hw_state)) == 0) {
  222. DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
  223. crtc->base.base.id, crtc->base.name, pll->name,
  224. shared_dpll[i].crtc_mask,
  225. pll->active_mask);
  226. return pll;
  227. }
  228. }
  229. /* Ok no matching timings, maybe there's a free one? */
  230. for (i = range_min; i <= range_max; i++) {
  231. pll = &dev_priv->shared_dplls[i];
  232. if (shared_dpll[i].crtc_mask == 0) {
  233. DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
  234. crtc->base.base.id, crtc->base.name, pll->name);
  235. return pll;
  236. }
  237. }
  238. return NULL;
  239. }
  240. static void
  241. intel_reference_shared_dpll(struct intel_shared_dpll *pll,
  242. struct intel_crtc_state *crtc_state)
  243. {
  244. struct intel_shared_dpll_state *shared_dpll;
  245. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  246. enum intel_dpll_id i = pll->id;
  247. shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
  248. if (shared_dpll[i].crtc_mask == 0)
  249. shared_dpll[i].hw_state =
  250. crtc_state->dpll_hw_state;
  251. crtc_state->shared_dpll = pll;
  252. DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
  253. pipe_name(crtc->pipe));
  254. shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
  255. }
  256. /**
  257. * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
  258. * @state: atomic state
  259. *
  260. * This is the dpll version of drm_atomic_helper_swap_state() since the
  261. * helper does not handle driver-specific global state.
  262. *
  263. * For consistency with atomic helpers this function does a complete swap,
  264. * i.e. it also puts the current state into @state, even though there is no
  265. * need for that at this moment.
  266. */
  267. void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
  268. {
  269. struct drm_i915_private *dev_priv = to_i915(state->dev);
  270. struct intel_shared_dpll_state *shared_dpll;
  271. struct intel_shared_dpll *pll;
  272. enum intel_dpll_id i;
  273. if (!to_intel_atomic_state(state)->dpll_set)
  274. return;
  275. shared_dpll = to_intel_atomic_state(state)->shared_dpll;
  276. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  277. struct intel_shared_dpll_state tmp;
  278. pll = &dev_priv->shared_dplls[i];
  279. tmp = pll->state;
  280. pll->state = shared_dpll[i];
  281. shared_dpll[i] = tmp;
  282. }
  283. }
  284. static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
  285. struct intel_shared_dpll *pll,
  286. struct intel_dpll_hw_state *hw_state)
  287. {
  288. uint32_t val;
  289. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  290. return false;
  291. val = I915_READ(PCH_DPLL(pll->id));
  292. hw_state->dpll = val;
  293. hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
  294. hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
  295. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  296. return val & DPLL_VCO_ENABLE;
  297. }
  298. static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
  299. struct intel_shared_dpll *pll)
  300. {
  301. I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
  302. I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
  303. }
  304. static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  305. {
  306. u32 val;
  307. bool enabled;
  308. I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
  309. val = I915_READ(PCH_DREF_CONTROL);
  310. enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
  311. DREF_SUPERSPREAD_SOURCE_MASK));
  312. I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  313. }
  314. static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
  315. struct intel_shared_dpll *pll)
  316. {
  317. /* PCH refclock must be enabled first */
  318. ibx_assert_pch_refclk_enabled(dev_priv);
  319. I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
  320. /* Wait for the clocks to stabilize. */
  321. POSTING_READ(PCH_DPLL(pll->id));
  322. udelay(150);
  323. /* The pixel multiplier can only be updated once the
  324. * DPLL is enabled and the clocks are stable.
  325. *
  326. * So write it again.
  327. */
  328. I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
  329. POSTING_READ(PCH_DPLL(pll->id));
  330. udelay(200);
  331. }
  332. static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
  333. struct intel_shared_dpll *pll)
  334. {
  335. struct drm_device *dev = &dev_priv->drm;
  336. struct intel_crtc *crtc;
  337. /* Make sure no transcoder isn't still depending on us. */
  338. for_each_intel_crtc(dev, crtc) {
  339. if (crtc->config->shared_dpll == pll)
  340. assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
  341. }
  342. I915_WRITE(PCH_DPLL(pll->id), 0);
  343. POSTING_READ(PCH_DPLL(pll->id));
  344. udelay(200);
  345. }
  346. static struct intel_shared_dpll *
  347. ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  348. struct intel_encoder *encoder)
  349. {
  350. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  351. struct intel_shared_dpll *pll;
  352. enum intel_dpll_id i;
  353. if (HAS_PCH_IBX(dev_priv)) {
  354. /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
  355. i = (enum intel_dpll_id) crtc->pipe;
  356. pll = &dev_priv->shared_dplls[i];
  357. DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  358. crtc->base.base.id, crtc->base.name, pll->name);
  359. } else {
  360. pll = intel_find_shared_dpll(crtc, crtc_state,
  361. DPLL_ID_PCH_PLL_A,
  362. DPLL_ID_PCH_PLL_B);
  363. }
  364. if (!pll)
  365. return NULL;
  366. /* reference the pll */
  367. intel_reference_shared_dpll(pll, crtc_state);
  368. return pll;
  369. }
  370. static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
  371. struct intel_dpll_hw_state *hw_state)
  372. {
  373. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  374. "fp0: 0x%x, fp1: 0x%x\n",
  375. hw_state->dpll,
  376. hw_state->dpll_md,
  377. hw_state->fp0,
  378. hw_state->fp1);
  379. }
  380. static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
  381. .prepare = ibx_pch_dpll_prepare,
  382. .enable = ibx_pch_dpll_enable,
  383. .disable = ibx_pch_dpll_disable,
  384. .get_hw_state = ibx_pch_dpll_get_hw_state,
  385. };
  386. static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
  387. struct intel_shared_dpll *pll)
  388. {
  389. I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
  390. POSTING_READ(WRPLL_CTL(pll->id));
  391. udelay(20);
  392. }
  393. static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
  394. struct intel_shared_dpll *pll)
  395. {
  396. I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
  397. POSTING_READ(SPLL_CTL);
  398. udelay(20);
  399. }
  400. static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
  401. struct intel_shared_dpll *pll)
  402. {
  403. uint32_t val;
  404. val = I915_READ(WRPLL_CTL(pll->id));
  405. I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
  406. POSTING_READ(WRPLL_CTL(pll->id));
  407. }
  408. static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
  409. struct intel_shared_dpll *pll)
  410. {
  411. uint32_t val;
  412. val = I915_READ(SPLL_CTL);
  413. I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
  414. POSTING_READ(SPLL_CTL);
  415. }
  416. static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
  417. struct intel_shared_dpll *pll,
  418. struct intel_dpll_hw_state *hw_state)
  419. {
  420. uint32_t val;
  421. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  422. return false;
  423. val = I915_READ(WRPLL_CTL(pll->id));
  424. hw_state->wrpll = val;
  425. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  426. return val & WRPLL_PLL_ENABLE;
  427. }
  428. static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
  429. struct intel_shared_dpll *pll,
  430. struct intel_dpll_hw_state *hw_state)
  431. {
  432. uint32_t val;
  433. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  434. return false;
  435. val = I915_READ(SPLL_CTL);
  436. hw_state->spll = val;
  437. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  438. return val & SPLL_PLL_ENABLE;
  439. }
  440. #define LC_FREQ 2700
  441. #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
  442. #define P_MIN 2
  443. #define P_MAX 64
  444. #define P_INC 2
  445. /* Constraints for PLL good behavior */
  446. #define REF_MIN 48
  447. #define REF_MAX 400
  448. #define VCO_MIN 2400
  449. #define VCO_MAX 4800
  450. struct hsw_wrpll_rnp {
  451. unsigned p, n2, r2;
  452. };
  453. static unsigned hsw_wrpll_get_budget_for_freq(int clock)
  454. {
  455. unsigned budget;
  456. switch (clock) {
  457. case 25175000:
  458. case 25200000:
  459. case 27000000:
  460. case 27027000:
  461. case 37762500:
  462. case 37800000:
  463. case 40500000:
  464. case 40541000:
  465. case 54000000:
  466. case 54054000:
  467. case 59341000:
  468. case 59400000:
  469. case 72000000:
  470. case 74176000:
  471. case 74250000:
  472. case 81000000:
  473. case 81081000:
  474. case 89012000:
  475. case 89100000:
  476. case 108000000:
  477. case 108108000:
  478. case 111264000:
  479. case 111375000:
  480. case 148352000:
  481. case 148500000:
  482. case 162000000:
  483. case 162162000:
  484. case 222525000:
  485. case 222750000:
  486. case 296703000:
  487. case 297000000:
  488. budget = 0;
  489. break;
  490. case 233500000:
  491. case 245250000:
  492. case 247750000:
  493. case 253250000:
  494. case 298000000:
  495. budget = 1500;
  496. break;
  497. case 169128000:
  498. case 169500000:
  499. case 179500000:
  500. case 202000000:
  501. budget = 2000;
  502. break;
  503. case 256250000:
  504. case 262500000:
  505. case 270000000:
  506. case 272500000:
  507. case 273750000:
  508. case 280750000:
  509. case 281250000:
  510. case 286000000:
  511. case 291750000:
  512. budget = 4000;
  513. break;
  514. case 267250000:
  515. case 268500000:
  516. budget = 5000;
  517. break;
  518. default:
  519. budget = 1000;
  520. break;
  521. }
  522. return budget;
  523. }
  524. static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
  525. unsigned r2, unsigned n2, unsigned p,
  526. struct hsw_wrpll_rnp *best)
  527. {
  528. uint64_t a, b, c, d, diff, diff_best;
  529. /* No best (r,n,p) yet */
  530. if (best->p == 0) {
  531. best->p = p;
  532. best->n2 = n2;
  533. best->r2 = r2;
  534. return;
  535. }
  536. /*
  537. * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
  538. * freq2k.
  539. *
  540. * delta = 1e6 *
  541. * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
  542. * freq2k;
  543. *
  544. * and we would like delta <= budget.
  545. *
  546. * If the discrepancy is above the PPM-based budget, always prefer to
  547. * improve upon the previous solution. However, if you're within the
  548. * budget, try to maximize Ref * VCO, that is N / (P * R^2).
  549. */
  550. a = freq2k * budget * p * r2;
  551. b = freq2k * budget * best->p * best->r2;
  552. diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
  553. diff_best = abs_diff(freq2k * best->p * best->r2,
  554. LC_FREQ_2K * best->n2);
  555. c = 1000000 * diff;
  556. d = 1000000 * diff_best;
  557. if (a < c && b < d) {
  558. /* If both are above the budget, pick the closer */
  559. if (best->p * best->r2 * diff < p * r2 * diff_best) {
  560. best->p = p;
  561. best->n2 = n2;
  562. best->r2 = r2;
  563. }
  564. } else if (a >= c && b < d) {
  565. /* If A is below the threshold but B is above it? Update. */
  566. best->p = p;
  567. best->n2 = n2;
  568. best->r2 = r2;
  569. } else if (a >= c && b >= d) {
  570. /* Both are below the limit, so pick the higher n2/(r2*r2) */
  571. if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
  572. best->p = p;
  573. best->n2 = n2;
  574. best->r2 = r2;
  575. }
  576. }
  577. /* Otherwise a < c && b >= d, do nothing */
  578. }
  579. static void
  580. hsw_ddi_calculate_wrpll(int clock /* in Hz */,
  581. unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
  582. {
  583. uint64_t freq2k;
  584. unsigned p, n2, r2;
  585. struct hsw_wrpll_rnp best = { 0, 0, 0 };
  586. unsigned budget;
  587. freq2k = clock / 100;
  588. budget = hsw_wrpll_get_budget_for_freq(clock);
  589. /* Special case handling for 540 pixel clock: bypass WR PLL entirely
  590. * and directly pass the LC PLL to it. */
  591. if (freq2k == 5400000) {
  592. *n2_out = 2;
  593. *p_out = 1;
  594. *r2_out = 2;
  595. return;
  596. }
  597. /*
  598. * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
  599. * the WR PLL.
  600. *
  601. * We want R so that REF_MIN <= Ref <= REF_MAX.
  602. * Injecting R2 = 2 * R gives:
  603. * REF_MAX * r2 > LC_FREQ * 2 and
  604. * REF_MIN * r2 < LC_FREQ * 2
  605. *
  606. * Which means the desired boundaries for r2 are:
  607. * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
  608. *
  609. */
  610. for (r2 = LC_FREQ * 2 / REF_MAX + 1;
  611. r2 <= LC_FREQ * 2 / REF_MIN;
  612. r2++) {
  613. /*
  614. * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
  615. *
  616. * Once again we want VCO_MIN <= VCO <= VCO_MAX.
  617. * Injecting R2 = 2 * R and N2 = 2 * N, we get:
  618. * VCO_MAX * r2 > n2 * LC_FREQ and
  619. * VCO_MIN * r2 < n2 * LC_FREQ)
  620. *
  621. * Which means the desired boundaries for n2 are:
  622. * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
  623. */
  624. for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
  625. n2 <= VCO_MAX * r2 / LC_FREQ;
  626. n2++) {
  627. for (p = P_MIN; p <= P_MAX; p += P_INC)
  628. hsw_wrpll_update_rnp(freq2k, budget,
  629. r2, n2, p, &best);
  630. }
  631. }
  632. *n2_out = best.n2;
  633. *p_out = best.p;
  634. *r2_out = best.r2;
  635. }
  636. static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
  637. struct intel_crtc *crtc,
  638. struct intel_crtc_state *crtc_state)
  639. {
  640. struct intel_shared_dpll *pll;
  641. uint32_t val;
  642. unsigned int p, n2, r2;
  643. hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
  644. val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
  645. WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
  646. WRPLL_DIVIDER_POST(p);
  647. crtc_state->dpll_hw_state.wrpll = val;
  648. pll = intel_find_shared_dpll(crtc, crtc_state,
  649. DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
  650. if (!pll)
  651. return NULL;
  652. return pll;
  653. }
  654. static struct intel_shared_dpll *
  655. hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
  656. {
  657. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  658. struct intel_shared_dpll *pll;
  659. enum intel_dpll_id pll_id;
  660. switch (clock / 2) {
  661. case 81000:
  662. pll_id = DPLL_ID_LCPLL_810;
  663. break;
  664. case 135000:
  665. pll_id = DPLL_ID_LCPLL_1350;
  666. break;
  667. case 270000:
  668. pll_id = DPLL_ID_LCPLL_2700;
  669. break;
  670. default:
  671. DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
  672. return NULL;
  673. }
  674. pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
  675. if (!pll)
  676. return NULL;
  677. return pll;
  678. }
  679. static struct intel_shared_dpll *
  680. hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  681. struct intel_encoder *encoder)
  682. {
  683. struct intel_shared_dpll *pll;
  684. int clock = crtc_state->port_clock;
  685. memset(&crtc_state->dpll_hw_state, 0,
  686. sizeof(crtc_state->dpll_hw_state));
  687. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  688. pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
  689. } else if (intel_crtc_has_dp_encoder(crtc_state)) {
  690. pll = hsw_ddi_dp_get_dpll(encoder, clock);
  691. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  692. if (WARN_ON(crtc_state->port_clock / 2 != 135000))
  693. return NULL;
  694. crtc_state->dpll_hw_state.spll =
  695. SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
  696. pll = intel_find_shared_dpll(crtc, crtc_state,
  697. DPLL_ID_SPLL, DPLL_ID_SPLL);
  698. } else {
  699. return NULL;
  700. }
  701. if (!pll)
  702. return NULL;
  703. intel_reference_shared_dpll(pll, crtc_state);
  704. return pll;
  705. }
  706. static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
  707. struct intel_dpll_hw_state *hw_state)
  708. {
  709. DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  710. hw_state->wrpll, hw_state->spll);
  711. }
  712. static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
  713. .enable = hsw_ddi_wrpll_enable,
  714. .disable = hsw_ddi_wrpll_disable,
  715. .get_hw_state = hsw_ddi_wrpll_get_hw_state,
  716. };
  717. static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
  718. .enable = hsw_ddi_spll_enable,
  719. .disable = hsw_ddi_spll_disable,
  720. .get_hw_state = hsw_ddi_spll_get_hw_state,
  721. };
  722. static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
  723. struct intel_shared_dpll *pll)
  724. {
  725. }
  726. static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
  727. struct intel_shared_dpll *pll)
  728. {
  729. }
  730. static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
  731. struct intel_shared_dpll *pll,
  732. struct intel_dpll_hw_state *hw_state)
  733. {
  734. return true;
  735. }
  736. static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
  737. .enable = hsw_ddi_lcpll_enable,
  738. .disable = hsw_ddi_lcpll_disable,
  739. .get_hw_state = hsw_ddi_lcpll_get_hw_state,
  740. };
  741. struct skl_dpll_regs {
  742. i915_reg_t ctl, cfgcr1, cfgcr2;
  743. };
  744. /* this array is indexed by the *shared* pll id */
  745. static const struct skl_dpll_regs skl_dpll_regs[4] = {
  746. {
  747. /* DPLL 0 */
  748. .ctl = LCPLL1_CTL,
  749. /* DPLL 0 doesn't support HDMI mode */
  750. },
  751. {
  752. /* DPLL 1 */
  753. .ctl = LCPLL2_CTL,
  754. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
  755. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
  756. },
  757. {
  758. /* DPLL 2 */
  759. .ctl = WRPLL_CTL(0),
  760. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
  761. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
  762. },
  763. {
  764. /* DPLL 3 */
  765. .ctl = WRPLL_CTL(1),
  766. .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
  767. .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
  768. },
  769. };
  770. static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
  771. struct intel_shared_dpll *pll)
  772. {
  773. uint32_t val;
  774. val = I915_READ(DPLL_CTRL1);
  775. val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
  776. DPLL_CTRL1_LINK_RATE_MASK(pll->id));
  777. val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
  778. I915_WRITE(DPLL_CTRL1, val);
  779. POSTING_READ(DPLL_CTRL1);
  780. }
  781. static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  782. struct intel_shared_dpll *pll)
  783. {
  784. const struct skl_dpll_regs *regs = skl_dpll_regs;
  785. skl_ddi_pll_write_ctrl1(dev_priv, pll);
  786. I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
  787. I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
  788. POSTING_READ(regs[pll->id].cfgcr1);
  789. POSTING_READ(regs[pll->id].cfgcr2);
  790. /* the enable bit is always bit 31 */
  791. I915_WRITE(regs[pll->id].ctl,
  792. I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
  793. if (intel_wait_for_register(dev_priv,
  794. DPLL_STATUS,
  795. DPLL_LOCK(pll->id),
  796. DPLL_LOCK(pll->id),
  797. 5))
  798. DRM_ERROR("DPLL %d not locked\n", pll->id);
  799. }
  800. static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
  801. struct intel_shared_dpll *pll)
  802. {
  803. skl_ddi_pll_write_ctrl1(dev_priv, pll);
  804. }
  805. static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  806. struct intel_shared_dpll *pll)
  807. {
  808. const struct skl_dpll_regs *regs = skl_dpll_regs;
  809. /* the enable bit is always bit 31 */
  810. I915_WRITE(regs[pll->id].ctl,
  811. I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
  812. POSTING_READ(regs[pll->id].ctl);
  813. }
  814. static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
  815. struct intel_shared_dpll *pll)
  816. {
  817. }
  818. static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  819. struct intel_shared_dpll *pll,
  820. struct intel_dpll_hw_state *hw_state)
  821. {
  822. uint32_t val;
  823. const struct skl_dpll_regs *regs = skl_dpll_regs;
  824. bool ret;
  825. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  826. return false;
  827. ret = false;
  828. val = I915_READ(regs[pll->id].ctl);
  829. if (!(val & LCPLL_PLL_ENABLE))
  830. goto out;
  831. val = I915_READ(DPLL_CTRL1);
  832. hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
  833. /* avoid reading back stale values if HDMI mode is not enabled */
  834. if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
  835. hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
  836. hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
  837. }
  838. ret = true;
  839. out:
  840. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  841. return ret;
  842. }
  843. static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
  844. struct intel_shared_dpll *pll,
  845. struct intel_dpll_hw_state *hw_state)
  846. {
  847. uint32_t val;
  848. const struct skl_dpll_regs *regs = skl_dpll_regs;
  849. bool ret;
  850. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  851. return false;
  852. ret = false;
  853. /* DPLL0 is always enabled since it drives CDCLK */
  854. val = I915_READ(regs[pll->id].ctl);
  855. if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
  856. goto out;
  857. val = I915_READ(DPLL_CTRL1);
  858. hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
  859. ret = true;
  860. out:
  861. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  862. return ret;
  863. }
  864. struct skl_wrpll_context {
  865. uint64_t min_deviation; /* current minimal deviation */
  866. uint64_t central_freq; /* chosen central freq */
  867. uint64_t dco_freq; /* chosen dco freq */
  868. unsigned int p; /* chosen divider */
  869. };
  870. static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
  871. {
  872. memset(ctx, 0, sizeof(*ctx));
  873. ctx->min_deviation = U64_MAX;
  874. }
  875. /* DCO freq must be within +1%/-6% of the DCO central freq */
  876. #define SKL_DCO_MAX_PDEVIATION 100
  877. #define SKL_DCO_MAX_NDEVIATION 600
  878. static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
  879. uint64_t central_freq,
  880. uint64_t dco_freq,
  881. unsigned int divider)
  882. {
  883. uint64_t deviation;
  884. deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
  885. central_freq);
  886. /* positive deviation */
  887. if (dco_freq >= central_freq) {
  888. if (deviation < SKL_DCO_MAX_PDEVIATION &&
  889. deviation < ctx->min_deviation) {
  890. ctx->min_deviation = deviation;
  891. ctx->central_freq = central_freq;
  892. ctx->dco_freq = dco_freq;
  893. ctx->p = divider;
  894. }
  895. /* negative deviation */
  896. } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
  897. deviation < ctx->min_deviation) {
  898. ctx->min_deviation = deviation;
  899. ctx->central_freq = central_freq;
  900. ctx->dco_freq = dco_freq;
  901. ctx->p = divider;
  902. }
  903. }
  904. static void skl_wrpll_get_multipliers(unsigned int p,
  905. unsigned int *p0 /* out */,
  906. unsigned int *p1 /* out */,
  907. unsigned int *p2 /* out */)
  908. {
  909. /* even dividers */
  910. if (p % 2 == 0) {
  911. unsigned int half = p / 2;
  912. if (half == 1 || half == 2 || half == 3 || half == 5) {
  913. *p0 = 2;
  914. *p1 = 1;
  915. *p2 = half;
  916. } else if (half % 2 == 0) {
  917. *p0 = 2;
  918. *p1 = half / 2;
  919. *p2 = 2;
  920. } else if (half % 3 == 0) {
  921. *p0 = 3;
  922. *p1 = half / 3;
  923. *p2 = 2;
  924. } else if (half % 7 == 0) {
  925. *p0 = 7;
  926. *p1 = half / 7;
  927. *p2 = 2;
  928. }
  929. } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
  930. *p0 = 3;
  931. *p1 = 1;
  932. *p2 = p / 3;
  933. } else if (p == 5 || p == 7) {
  934. *p0 = p;
  935. *p1 = 1;
  936. *p2 = 1;
  937. } else if (p == 15) {
  938. *p0 = 3;
  939. *p1 = 1;
  940. *p2 = 5;
  941. } else if (p == 21) {
  942. *p0 = 7;
  943. *p1 = 1;
  944. *p2 = 3;
  945. } else if (p == 35) {
  946. *p0 = 7;
  947. *p1 = 1;
  948. *p2 = 5;
  949. }
  950. }
  951. struct skl_wrpll_params {
  952. uint32_t dco_fraction;
  953. uint32_t dco_integer;
  954. uint32_t qdiv_ratio;
  955. uint32_t qdiv_mode;
  956. uint32_t kdiv;
  957. uint32_t pdiv;
  958. uint32_t central_freq;
  959. };
  960. static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
  961. uint64_t afe_clock,
  962. uint64_t central_freq,
  963. uint32_t p0, uint32_t p1, uint32_t p2)
  964. {
  965. uint64_t dco_freq;
  966. switch (central_freq) {
  967. case 9600000000ULL:
  968. params->central_freq = 0;
  969. break;
  970. case 9000000000ULL:
  971. params->central_freq = 1;
  972. break;
  973. case 8400000000ULL:
  974. params->central_freq = 3;
  975. }
  976. switch (p0) {
  977. case 1:
  978. params->pdiv = 0;
  979. break;
  980. case 2:
  981. params->pdiv = 1;
  982. break;
  983. case 3:
  984. params->pdiv = 2;
  985. break;
  986. case 7:
  987. params->pdiv = 4;
  988. break;
  989. default:
  990. WARN(1, "Incorrect PDiv\n");
  991. }
  992. switch (p2) {
  993. case 5:
  994. params->kdiv = 0;
  995. break;
  996. case 2:
  997. params->kdiv = 1;
  998. break;
  999. case 3:
  1000. params->kdiv = 2;
  1001. break;
  1002. case 1:
  1003. params->kdiv = 3;
  1004. break;
  1005. default:
  1006. WARN(1, "Incorrect KDiv\n");
  1007. }
  1008. params->qdiv_ratio = p1;
  1009. params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
  1010. dco_freq = p0 * p1 * p2 * afe_clock;
  1011. /*
  1012. * Intermediate values are in Hz.
  1013. * Divide by MHz to match bsepc
  1014. */
  1015. params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
  1016. params->dco_fraction =
  1017. div_u64((div_u64(dco_freq, 24) -
  1018. params->dco_integer * MHz(1)) * 0x8000, MHz(1));
  1019. }
  1020. static bool
  1021. skl_ddi_calculate_wrpll(int clock /* in Hz */,
  1022. struct skl_wrpll_params *wrpll_params)
  1023. {
  1024. uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
  1025. uint64_t dco_central_freq[3] = {8400000000ULL,
  1026. 9000000000ULL,
  1027. 9600000000ULL};
  1028. static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
  1029. 24, 28, 30, 32, 36, 40, 42, 44,
  1030. 48, 52, 54, 56, 60, 64, 66, 68,
  1031. 70, 72, 76, 78, 80, 84, 88, 90,
  1032. 92, 96, 98 };
  1033. static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
  1034. static const struct {
  1035. const int *list;
  1036. int n_dividers;
  1037. } dividers[] = {
  1038. { even_dividers, ARRAY_SIZE(even_dividers) },
  1039. { odd_dividers, ARRAY_SIZE(odd_dividers) },
  1040. };
  1041. struct skl_wrpll_context ctx;
  1042. unsigned int dco, d, i;
  1043. unsigned int p0, p1, p2;
  1044. skl_wrpll_context_init(&ctx);
  1045. for (d = 0; d < ARRAY_SIZE(dividers); d++) {
  1046. for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
  1047. for (i = 0; i < dividers[d].n_dividers; i++) {
  1048. unsigned int p = dividers[d].list[i];
  1049. uint64_t dco_freq = p * afe_clock;
  1050. skl_wrpll_try_divider(&ctx,
  1051. dco_central_freq[dco],
  1052. dco_freq,
  1053. p);
  1054. /*
  1055. * Skip the remaining dividers if we're sure to
  1056. * have found the definitive divider, we can't
  1057. * improve a 0 deviation.
  1058. */
  1059. if (ctx.min_deviation == 0)
  1060. goto skip_remaining_dividers;
  1061. }
  1062. }
  1063. skip_remaining_dividers:
  1064. /*
  1065. * If a solution is found with an even divider, prefer
  1066. * this one.
  1067. */
  1068. if (d == 0 && ctx.p)
  1069. break;
  1070. }
  1071. if (!ctx.p) {
  1072. DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
  1073. return false;
  1074. }
  1075. /*
  1076. * gcc incorrectly analyses that these can be used without being
  1077. * initialized. To be fair, it's hard to guess.
  1078. */
  1079. p0 = p1 = p2 = 0;
  1080. skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
  1081. skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
  1082. p0, p1, p2);
  1083. return true;
  1084. }
  1085. static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
  1086. struct intel_crtc_state *crtc_state,
  1087. int clock)
  1088. {
  1089. uint32_t ctrl1, cfgcr1, cfgcr2;
  1090. struct skl_wrpll_params wrpll_params = { 0, };
  1091. /*
  1092. * See comment in intel_dpll_hw_state to understand why we always use 0
  1093. * as the DPLL id in this function.
  1094. */
  1095. ctrl1 = DPLL_CTRL1_OVERRIDE(0);
  1096. ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
  1097. if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
  1098. return false;
  1099. cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
  1100. DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
  1101. wrpll_params.dco_integer;
  1102. cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
  1103. DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
  1104. DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
  1105. DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
  1106. wrpll_params.central_freq;
  1107. memset(&crtc_state->dpll_hw_state, 0,
  1108. sizeof(crtc_state->dpll_hw_state));
  1109. crtc_state->dpll_hw_state.ctrl1 = ctrl1;
  1110. crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
  1111. crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
  1112. return true;
  1113. }
  1114. static bool
  1115. skl_ddi_dp_set_dpll_hw_state(int clock,
  1116. struct intel_dpll_hw_state *dpll_hw_state)
  1117. {
  1118. uint32_t ctrl1;
  1119. /*
  1120. * See comment in intel_dpll_hw_state to understand why we always use 0
  1121. * as the DPLL id in this function.
  1122. */
  1123. ctrl1 = DPLL_CTRL1_OVERRIDE(0);
  1124. switch (clock / 2) {
  1125. case 81000:
  1126. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
  1127. break;
  1128. case 135000:
  1129. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
  1130. break;
  1131. case 270000:
  1132. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
  1133. break;
  1134. /* eDP 1.4 rates */
  1135. case 162000:
  1136. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
  1137. break;
  1138. case 108000:
  1139. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
  1140. break;
  1141. case 216000:
  1142. ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
  1143. break;
  1144. }
  1145. dpll_hw_state->ctrl1 = ctrl1;
  1146. return true;
  1147. }
  1148. static struct intel_shared_dpll *
  1149. skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  1150. struct intel_encoder *encoder)
  1151. {
  1152. struct intel_shared_dpll *pll;
  1153. int clock = crtc_state->port_clock;
  1154. bool bret;
  1155. struct intel_dpll_hw_state dpll_hw_state;
  1156. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  1157. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  1158. bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
  1159. if (!bret) {
  1160. DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
  1161. return NULL;
  1162. }
  1163. } else if (intel_crtc_has_dp_encoder(crtc_state)) {
  1164. bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
  1165. if (!bret) {
  1166. DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
  1167. return NULL;
  1168. }
  1169. crtc_state->dpll_hw_state = dpll_hw_state;
  1170. } else {
  1171. return NULL;
  1172. }
  1173. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
  1174. pll = intel_find_shared_dpll(crtc, crtc_state,
  1175. DPLL_ID_SKL_DPLL0,
  1176. DPLL_ID_SKL_DPLL0);
  1177. else
  1178. pll = intel_find_shared_dpll(crtc, crtc_state,
  1179. DPLL_ID_SKL_DPLL1,
  1180. DPLL_ID_SKL_DPLL3);
  1181. if (!pll)
  1182. return NULL;
  1183. intel_reference_shared_dpll(pll, crtc_state);
  1184. return pll;
  1185. }
  1186. static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
  1187. struct intel_dpll_hw_state *hw_state)
  1188. {
  1189. DRM_DEBUG_KMS("dpll_hw_state: "
  1190. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  1191. hw_state->ctrl1,
  1192. hw_state->cfgcr1,
  1193. hw_state->cfgcr2);
  1194. }
  1195. static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
  1196. .enable = skl_ddi_pll_enable,
  1197. .disable = skl_ddi_pll_disable,
  1198. .get_hw_state = skl_ddi_pll_get_hw_state,
  1199. };
  1200. static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
  1201. .enable = skl_ddi_dpll0_enable,
  1202. .disable = skl_ddi_dpll0_disable,
  1203. .get_hw_state = skl_ddi_dpll0_get_hw_state,
  1204. };
  1205. static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
  1206. struct intel_shared_dpll *pll)
  1207. {
  1208. uint32_t temp;
  1209. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1210. enum dpio_phy phy;
  1211. enum dpio_channel ch;
  1212. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  1213. /* Non-SSC reference */
  1214. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1215. temp |= PORT_PLL_REF_SEL;
  1216. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1217. if (IS_GEMINILAKE(dev_priv)) {
  1218. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1219. temp |= PORT_PLL_POWER_ENABLE;
  1220. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1221. if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
  1222. PORT_PLL_POWER_STATE), 200))
  1223. DRM_ERROR("Power state not set for PLL:%d\n", port);
  1224. }
  1225. /* Disable 10 bit clock */
  1226. temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1227. temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
  1228. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1229. /* Write P1 & P2 */
  1230. temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
  1231. temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
  1232. temp |= pll->state.hw_state.ebb0;
  1233. I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
  1234. /* Write M2 integer */
  1235. temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
  1236. temp &= ~PORT_PLL_M2_MASK;
  1237. temp |= pll->state.hw_state.pll0;
  1238. I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
  1239. /* Write N */
  1240. temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
  1241. temp &= ~PORT_PLL_N_MASK;
  1242. temp |= pll->state.hw_state.pll1;
  1243. I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
  1244. /* Write M2 fraction */
  1245. temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
  1246. temp &= ~PORT_PLL_M2_FRAC_MASK;
  1247. temp |= pll->state.hw_state.pll2;
  1248. I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
  1249. /* Write M2 fraction enable */
  1250. temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
  1251. temp &= ~PORT_PLL_M2_FRAC_ENABLE;
  1252. temp |= pll->state.hw_state.pll3;
  1253. I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
  1254. /* Write coeff */
  1255. temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
  1256. temp &= ~PORT_PLL_PROP_COEFF_MASK;
  1257. temp &= ~PORT_PLL_INT_COEFF_MASK;
  1258. temp &= ~PORT_PLL_GAIN_CTL_MASK;
  1259. temp |= pll->state.hw_state.pll6;
  1260. I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
  1261. /* Write calibration val */
  1262. temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
  1263. temp &= ~PORT_PLL_TARGET_CNT_MASK;
  1264. temp |= pll->state.hw_state.pll8;
  1265. I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
  1266. temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
  1267. temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
  1268. temp |= pll->state.hw_state.pll9;
  1269. I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
  1270. temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
  1271. temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
  1272. temp &= ~PORT_PLL_DCO_AMP_MASK;
  1273. temp |= pll->state.hw_state.pll10;
  1274. I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
  1275. /* Recalibrate with new settings */
  1276. temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1277. temp |= PORT_PLL_RECALIBRATE;
  1278. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1279. temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
  1280. temp |= pll->state.hw_state.ebb4;
  1281. I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
  1282. /* Enable PLL */
  1283. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1284. temp |= PORT_PLL_ENABLE;
  1285. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1286. POSTING_READ(BXT_PORT_PLL_ENABLE(port));
  1287. if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
  1288. 200))
  1289. DRM_ERROR("PLL %d not locked\n", port);
  1290. if (IS_GEMINILAKE(dev_priv)) {
  1291. temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
  1292. temp |= DCC_DELAY_RANGE_2;
  1293. I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
  1294. }
  1295. /*
  1296. * While we write to the group register to program all lanes at once we
  1297. * can read only lane registers and we pick lanes 0/1 for that.
  1298. */
  1299. temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
  1300. temp &= ~LANE_STAGGER_MASK;
  1301. temp &= ~LANESTAGGER_STRAP_OVRD;
  1302. temp |= pll->state.hw_state.pcsdw12;
  1303. I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
  1304. }
  1305. static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
  1306. struct intel_shared_dpll *pll)
  1307. {
  1308. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1309. uint32_t temp;
  1310. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1311. temp &= ~PORT_PLL_ENABLE;
  1312. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1313. POSTING_READ(BXT_PORT_PLL_ENABLE(port));
  1314. if (IS_GEMINILAKE(dev_priv)) {
  1315. temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1316. temp &= ~PORT_PLL_POWER_ENABLE;
  1317. I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
  1318. if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
  1319. PORT_PLL_POWER_STATE), 200))
  1320. DRM_ERROR("Power state not reset for PLL:%d\n", port);
  1321. }
  1322. }
  1323. static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  1324. struct intel_shared_dpll *pll,
  1325. struct intel_dpll_hw_state *hw_state)
  1326. {
  1327. enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
  1328. uint32_t val;
  1329. bool ret;
  1330. enum dpio_phy phy;
  1331. enum dpio_channel ch;
  1332. bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
  1333. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  1334. return false;
  1335. ret = false;
  1336. val = I915_READ(BXT_PORT_PLL_ENABLE(port));
  1337. if (!(val & PORT_PLL_ENABLE))
  1338. goto out;
  1339. hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
  1340. hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
  1341. hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
  1342. hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
  1343. hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
  1344. hw_state->pll0 &= PORT_PLL_M2_MASK;
  1345. hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
  1346. hw_state->pll1 &= PORT_PLL_N_MASK;
  1347. hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
  1348. hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
  1349. hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
  1350. hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
  1351. hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
  1352. hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
  1353. PORT_PLL_INT_COEFF_MASK |
  1354. PORT_PLL_GAIN_CTL_MASK;
  1355. hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
  1356. hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
  1357. hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
  1358. hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
  1359. hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
  1360. hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
  1361. PORT_PLL_DCO_AMP_MASK;
  1362. /*
  1363. * While we write to the group register to program all lanes at once we
  1364. * can read only lane registers. We configure all lanes the same way, so
  1365. * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
  1366. */
  1367. hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
  1368. if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
  1369. DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
  1370. hw_state->pcsdw12,
  1371. I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
  1372. hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
  1373. ret = true;
  1374. out:
  1375. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  1376. return ret;
  1377. }
  1378. /* bxt clock parameters */
  1379. struct bxt_clk_div {
  1380. int clock;
  1381. uint32_t p1;
  1382. uint32_t p2;
  1383. uint32_t m2_int;
  1384. uint32_t m2_frac;
  1385. bool m2_frac_en;
  1386. uint32_t n;
  1387. int vco;
  1388. };
  1389. /* pre-calculated values for DP linkrates */
  1390. static const struct bxt_clk_div bxt_dp_clk_val[] = {
  1391. {162000, 4, 2, 32, 1677722, 1, 1},
  1392. {270000, 4, 1, 27, 0, 0, 1},
  1393. {540000, 2, 1, 27, 0, 0, 1},
  1394. {216000, 3, 2, 32, 1677722, 1, 1},
  1395. {243000, 4, 1, 24, 1258291, 1, 1},
  1396. {324000, 4, 1, 32, 1677722, 1, 1},
  1397. {432000, 3, 1, 32, 1677722, 1, 1}
  1398. };
  1399. static bool
  1400. bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
  1401. struct intel_crtc_state *crtc_state, int clock,
  1402. struct bxt_clk_div *clk_div)
  1403. {
  1404. struct dpll best_clock;
  1405. /* Calculate HDMI div */
  1406. /*
  1407. * FIXME: tie the following calculation into
  1408. * i9xx_crtc_compute_clock
  1409. */
  1410. if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
  1411. DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
  1412. clock, pipe_name(intel_crtc->pipe));
  1413. return false;
  1414. }
  1415. clk_div->p1 = best_clock.p1;
  1416. clk_div->p2 = best_clock.p2;
  1417. WARN_ON(best_clock.m1 != 2);
  1418. clk_div->n = best_clock.n;
  1419. clk_div->m2_int = best_clock.m2 >> 22;
  1420. clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
  1421. clk_div->m2_frac_en = clk_div->m2_frac != 0;
  1422. clk_div->vco = best_clock.vco;
  1423. return true;
  1424. }
  1425. static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
  1426. {
  1427. int i;
  1428. *clk_div = bxt_dp_clk_val[0];
  1429. for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
  1430. if (bxt_dp_clk_val[i].clock == clock) {
  1431. *clk_div = bxt_dp_clk_val[i];
  1432. break;
  1433. }
  1434. }
  1435. clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
  1436. }
  1437. static bool bxt_ddi_set_dpll_hw_state(int clock,
  1438. struct bxt_clk_div *clk_div,
  1439. struct intel_dpll_hw_state *dpll_hw_state)
  1440. {
  1441. int vco = clk_div->vco;
  1442. uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
  1443. uint32_t lanestagger;
  1444. if (vco >= 6200000 && vco <= 6700000) {
  1445. prop_coef = 4;
  1446. int_coef = 9;
  1447. gain_ctl = 3;
  1448. targ_cnt = 8;
  1449. } else if ((vco > 5400000 && vco < 6200000) ||
  1450. (vco >= 4800000 && vco < 5400000)) {
  1451. prop_coef = 5;
  1452. int_coef = 11;
  1453. gain_ctl = 3;
  1454. targ_cnt = 9;
  1455. } else if (vco == 5400000) {
  1456. prop_coef = 3;
  1457. int_coef = 8;
  1458. gain_ctl = 1;
  1459. targ_cnt = 9;
  1460. } else {
  1461. DRM_ERROR("Invalid VCO\n");
  1462. return false;
  1463. }
  1464. if (clock > 270000)
  1465. lanestagger = 0x18;
  1466. else if (clock > 135000)
  1467. lanestagger = 0x0d;
  1468. else if (clock > 67000)
  1469. lanestagger = 0x07;
  1470. else if (clock > 33000)
  1471. lanestagger = 0x04;
  1472. else
  1473. lanestagger = 0x02;
  1474. dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
  1475. dpll_hw_state->pll0 = clk_div->m2_int;
  1476. dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
  1477. dpll_hw_state->pll2 = clk_div->m2_frac;
  1478. if (clk_div->m2_frac_en)
  1479. dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
  1480. dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
  1481. dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
  1482. dpll_hw_state->pll8 = targ_cnt;
  1483. dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
  1484. dpll_hw_state->pll10 =
  1485. PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
  1486. | PORT_PLL_DCO_AMP_OVR_EN_H;
  1487. dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
  1488. dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
  1489. return true;
  1490. }
  1491. static bool
  1492. bxt_ddi_dp_set_dpll_hw_state(int clock,
  1493. struct intel_dpll_hw_state *dpll_hw_state)
  1494. {
  1495. struct bxt_clk_div clk_div = {0};
  1496. bxt_ddi_dp_pll_dividers(clock, &clk_div);
  1497. return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
  1498. }
  1499. static bool
  1500. bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
  1501. struct intel_crtc_state *crtc_state, int clock,
  1502. struct intel_dpll_hw_state *dpll_hw_state)
  1503. {
  1504. struct bxt_clk_div clk_div = { };
  1505. bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
  1506. return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
  1507. }
  1508. static struct intel_shared_dpll *
  1509. bxt_get_dpll(struct intel_crtc *crtc,
  1510. struct intel_crtc_state *crtc_state,
  1511. struct intel_encoder *encoder)
  1512. {
  1513. struct intel_dpll_hw_state dpll_hw_state = { };
  1514. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1515. struct intel_shared_dpll *pll;
  1516. int i, clock = crtc_state->port_clock;
  1517. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
  1518. !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
  1519. &dpll_hw_state))
  1520. return NULL;
  1521. if (intel_crtc_has_dp_encoder(crtc_state) &&
  1522. !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
  1523. return NULL;
  1524. memset(&crtc_state->dpll_hw_state, 0,
  1525. sizeof(crtc_state->dpll_hw_state));
  1526. crtc_state->dpll_hw_state = dpll_hw_state;
  1527. /* 1:1 mapping between ports and PLLs */
  1528. i = (enum intel_dpll_id) encoder->port;
  1529. pll = intel_get_shared_dpll_by_id(dev_priv, i);
  1530. DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
  1531. crtc->base.base.id, crtc->base.name, pll->name);
  1532. intel_reference_shared_dpll(pll, crtc_state);
  1533. return pll;
  1534. }
  1535. static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
  1536. struct intel_dpll_hw_state *hw_state)
  1537. {
  1538. DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  1539. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  1540. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  1541. hw_state->ebb0,
  1542. hw_state->ebb4,
  1543. hw_state->pll0,
  1544. hw_state->pll1,
  1545. hw_state->pll2,
  1546. hw_state->pll3,
  1547. hw_state->pll6,
  1548. hw_state->pll8,
  1549. hw_state->pll9,
  1550. hw_state->pll10,
  1551. hw_state->pcsdw12);
  1552. }
  1553. static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
  1554. .enable = bxt_ddi_pll_enable,
  1555. .disable = bxt_ddi_pll_disable,
  1556. .get_hw_state = bxt_ddi_pll_get_hw_state,
  1557. };
  1558. static void intel_ddi_pll_init(struct drm_device *dev)
  1559. {
  1560. struct drm_i915_private *dev_priv = to_i915(dev);
  1561. if (INTEL_GEN(dev_priv) < 9) {
  1562. uint32_t val = I915_READ(LCPLL_CTL);
  1563. /*
  1564. * The LCPLL register should be turned on by the BIOS. For now
  1565. * let's just check its state and print errors in case
  1566. * something is wrong. Don't even try to turn it on.
  1567. */
  1568. if (val & LCPLL_CD_SOURCE_FCLK)
  1569. DRM_ERROR("CDCLK source is not LCPLL\n");
  1570. if (val & LCPLL_PLL_DISABLE)
  1571. DRM_ERROR("LCPLL is disabled\n");
  1572. }
  1573. }
  1574. struct dpll_info {
  1575. const char *name;
  1576. const int id;
  1577. const struct intel_shared_dpll_funcs *funcs;
  1578. uint32_t flags;
  1579. };
  1580. struct intel_dpll_mgr {
  1581. const struct dpll_info *dpll_info;
  1582. struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
  1583. struct intel_crtc_state *crtc_state,
  1584. struct intel_encoder *encoder);
  1585. void (*dump_hw_state)(struct drm_i915_private *dev_priv,
  1586. struct intel_dpll_hw_state *hw_state);
  1587. };
  1588. static const struct dpll_info pch_plls[] = {
  1589. { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
  1590. { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
  1591. { NULL, -1, NULL, 0 },
  1592. };
  1593. static const struct intel_dpll_mgr pch_pll_mgr = {
  1594. .dpll_info = pch_plls,
  1595. .get_dpll = ibx_get_dpll,
  1596. .dump_hw_state = ibx_dump_hw_state,
  1597. };
  1598. static const struct dpll_info hsw_plls[] = {
  1599. { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
  1600. { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
  1601. { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
  1602. { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1603. { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1604. { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
  1605. { NULL, -1, NULL, },
  1606. };
  1607. static const struct intel_dpll_mgr hsw_pll_mgr = {
  1608. .dpll_info = hsw_plls,
  1609. .get_dpll = hsw_get_dpll,
  1610. .dump_hw_state = hsw_dump_hw_state,
  1611. };
  1612. static const struct dpll_info skl_plls[] = {
  1613. { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
  1614. { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
  1615. { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
  1616. { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
  1617. { NULL, -1, NULL, },
  1618. };
  1619. static const struct intel_dpll_mgr skl_pll_mgr = {
  1620. .dpll_info = skl_plls,
  1621. .get_dpll = skl_get_dpll,
  1622. .dump_hw_state = skl_dump_hw_state,
  1623. };
  1624. static const struct dpll_info bxt_plls[] = {
  1625. { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
  1626. { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
  1627. { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
  1628. { NULL, -1, NULL, },
  1629. };
  1630. static const struct intel_dpll_mgr bxt_pll_mgr = {
  1631. .dpll_info = bxt_plls,
  1632. .get_dpll = bxt_get_dpll,
  1633. .dump_hw_state = bxt_dump_hw_state,
  1634. };
  1635. static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
  1636. struct intel_shared_dpll *pll)
  1637. {
  1638. uint32_t val;
  1639. /* 1. Enable DPLL power in DPLL_ENABLE. */
  1640. val = I915_READ(CNL_DPLL_ENABLE(pll->id));
  1641. val |= PLL_POWER_ENABLE;
  1642. I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
  1643. /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
  1644. if (intel_wait_for_register(dev_priv,
  1645. CNL_DPLL_ENABLE(pll->id),
  1646. PLL_POWER_STATE,
  1647. PLL_POWER_STATE,
  1648. 5))
  1649. DRM_ERROR("PLL %d Power not enabled\n", pll->id);
  1650. /*
  1651. * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
  1652. * select DP mode, and set DP link rate.
  1653. */
  1654. val = pll->state.hw_state.cfgcr0;
  1655. I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
  1656. /* 4. Reab back to ensure writes completed */
  1657. POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
  1658. /* 3. Configure DPLL_CFGCR0 */
  1659. /* Avoid touch CFGCR1 if HDMI mode is not enabled */
  1660. if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
  1661. val = pll->state.hw_state.cfgcr1;
  1662. I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
  1663. /* 4. Reab back to ensure writes completed */
  1664. POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
  1665. }
  1666. /*
  1667. * 5. If the frequency will result in a change to the voltage
  1668. * requirement, follow the Display Voltage Frequency Switching
  1669. * Sequence Before Frequency Change
  1670. *
  1671. * Note: DVFS is actually handled via the cdclk code paths,
  1672. * hence we do nothing here.
  1673. */
  1674. /* 6. Enable DPLL in DPLL_ENABLE. */
  1675. val = I915_READ(CNL_DPLL_ENABLE(pll->id));
  1676. val |= PLL_ENABLE;
  1677. I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
  1678. /* 7. Wait for PLL lock status in DPLL_ENABLE. */
  1679. if (intel_wait_for_register(dev_priv,
  1680. CNL_DPLL_ENABLE(pll->id),
  1681. PLL_LOCK,
  1682. PLL_LOCK,
  1683. 5))
  1684. DRM_ERROR("PLL %d not locked\n", pll->id);
  1685. /*
  1686. * 8. If the frequency will result in a change to the voltage
  1687. * requirement, follow the Display Voltage Frequency Switching
  1688. * Sequence After Frequency Change
  1689. *
  1690. * Note: DVFS is actually handled via the cdclk code paths,
  1691. * hence we do nothing here.
  1692. */
  1693. /*
  1694. * 9. turn on the clock for the DDI and map the DPLL to the DDI
  1695. * Done at intel_ddi_clk_select
  1696. */
  1697. }
  1698. static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
  1699. struct intel_shared_dpll *pll)
  1700. {
  1701. uint32_t val;
  1702. /*
  1703. * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
  1704. * Done at intel_ddi_post_disable
  1705. */
  1706. /*
  1707. * 2. If the frequency will result in a change to the voltage
  1708. * requirement, follow the Display Voltage Frequency Switching
  1709. * Sequence Before Frequency Change
  1710. *
  1711. * Note: DVFS is actually handled via the cdclk code paths,
  1712. * hence we do nothing here.
  1713. */
  1714. /* 3. Disable DPLL through DPLL_ENABLE. */
  1715. val = I915_READ(CNL_DPLL_ENABLE(pll->id));
  1716. val &= ~PLL_ENABLE;
  1717. I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
  1718. /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
  1719. if (intel_wait_for_register(dev_priv,
  1720. CNL_DPLL_ENABLE(pll->id),
  1721. PLL_LOCK,
  1722. 0,
  1723. 5))
  1724. DRM_ERROR("PLL %d locked\n", pll->id);
  1725. /*
  1726. * 5. If the frequency will result in a change to the voltage
  1727. * requirement, follow the Display Voltage Frequency Switching
  1728. * Sequence After Frequency Change
  1729. *
  1730. * Note: DVFS is actually handled via the cdclk code paths,
  1731. * hence we do nothing here.
  1732. */
  1733. /* 6. Disable DPLL power in DPLL_ENABLE. */
  1734. val = I915_READ(CNL_DPLL_ENABLE(pll->id));
  1735. val &= ~PLL_POWER_ENABLE;
  1736. I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
  1737. /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
  1738. if (intel_wait_for_register(dev_priv,
  1739. CNL_DPLL_ENABLE(pll->id),
  1740. PLL_POWER_STATE,
  1741. 0,
  1742. 5))
  1743. DRM_ERROR("PLL %d Power not disabled\n", pll->id);
  1744. }
  1745. static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
  1746. struct intel_shared_dpll *pll,
  1747. struct intel_dpll_hw_state *hw_state)
  1748. {
  1749. uint32_t val;
  1750. bool ret;
  1751. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
  1752. return false;
  1753. ret = false;
  1754. val = I915_READ(CNL_DPLL_ENABLE(pll->id));
  1755. if (!(val & PLL_ENABLE))
  1756. goto out;
  1757. val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
  1758. hw_state->cfgcr0 = val;
  1759. /* avoid reading back stale values if HDMI mode is not enabled */
  1760. if (val & DPLL_CFGCR0_HDMI_MODE) {
  1761. hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
  1762. }
  1763. ret = true;
  1764. out:
  1765. intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  1766. return ret;
  1767. }
  1768. static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
  1769. int *qdiv, int *kdiv)
  1770. {
  1771. /* even dividers */
  1772. if (bestdiv % 2 == 0) {
  1773. if (bestdiv == 2) {
  1774. *pdiv = 2;
  1775. *qdiv = 1;
  1776. *kdiv = 1;
  1777. } else if (bestdiv % 4 == 0) {
  1778. *pdiv = 2;
  1779. *qdiv = bestdiv / 4;
  1780. *kdiv = 2;
  1781. } else if (bestdiv % 6 == 0) {
  1782. *pdiv = 3;
  1783. *qdiv = bestdiv / 6;
  1784. *kdiv = 2;
  1785. } else if (bestdiv % 5 == 0) {
  1786. *pdiv = 5;
  1787. *qdiv = bestdiv / 10;
  1788. *kdiv = 2;
  1789. } else if (bestdiv % 14 == 0) {
  1790. *pdiv = 7;
  1791. *qdiv = bestdiv / 14;
  1792. *kdiv = 2;
  1793. }
  1794. } else {
  1795. if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
  1796. *pdiv = bestdiv;
  1797. *qdiv = 1;
  1798. *kdiv = 1;
  1799. } else { /* 9, 15, 21 */
  1800. *pdiv = bestdiv / 3;
  1801. *qdiv = 1;
  1802. *kdiv = 3;
  1803. }
  1804. }
  1805. }
  1806. static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
  1807. u32 dco_freq, u32 ref_freq,
  1808. int pdiv, int qdiv, int kdiv)
  1809. {
  1810. u32 dco;
  1811. switch (kdiv) {
  1812. case 1:
  1813. params->kdiv = 1;
  1814. break;
  1815. case 2:
  1816. params->kdiv = 2;
  1817. break;
  1818. case 3:
  1819. params->kdiv = 4;
  1820. break;
  1821. default:
  1822. WARN(1, "Incorrect KDiv\n");
  1823. }
  1824. switch (pdiv) {
  1825. case 2:
  1826. params->pdiv = 1;
  1827. break;
  1828. case 3:
  1829. params->pdiv = 2;
  1830. break;
  1831. case 5:
  1832. params->pdiv = 4;
  1833. break;
  1834. case 7:
  1835. params->pdiv = 8;
  1836. break;
  1837. default:
  1838. WARN(1, "Incorrect PDiv\n");
  1839. }
  1840. WARN_ON(kdiv != 2 && qdiv != 1);
  1841. params->qdiv_ratio = qdiv;
  1842. params->qdiv_mode = (qdiv == 1) ? 0 : 1;
  1843. dco = div_u64((u64)dco_freq << 15, ref_freq);
  1844. params->dco_integer = dco >> 15;
  1845. params->dco_fraction = dco & 0x7fff;
  1846. }
  1847. static bool
  1848. cnl_ddi_calculate_wrpll(int clock,
  1849. struct drm_i915_private *dev_priv,
  1850. struct skl_wrpll_params *wrpll_params)
  1851. {
  1852. u32 afe_clock = clock * 5;
  1853. u32 dco_min = 7998000;
  1854. u32 dco_max = 10000000;
  1855. u32 dco_mid = (dco_min + dco_max) / 2;
  1856. static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
  1857. 18, 20, 24, 28, 30, 32, 36, 40,
  1858. 42, 44, 48, 50, 52, 54, 56, 60,
  1859. 64, 66, 68, 70, 72, 76, 78, 80,
  1860. 84, 88, 90, 92, 96, 98, 100, 102,
  1861. 3, 5, 7, 9, 15, 21 };
  1862. u32 dco, best_dco = 0, dco_centrality = 0;
  1863. u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
  1864. int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
  1865. for (d = 0; d < ARRAY_SIZE(dividers); d++) {
  1866. dco = afe_clock * dividers[d];
  1867. if ((dco <= dco_max) && (dco >= dco_min)) {
  1868. dco_centrality = abs(dco - dco_mid);
  1869. if (dco_centrality < best_dco_centrality) {
  1870. best_dco_centrality = dco_centrality;
  1871. best_div = dividers[d];
  1872. best_dco = dco;
  1873. }
  1874. }
  1875. }
  1876. if (best_div == 0)
  1877. return false;
  1878. cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
  1879. cnl_wrpll_params_populate(wrpll_params, best_dco,
  1880. dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
  1881. return true;
  1882. }
  1883. static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
  1884. struct intel_crtc_state *crtc_state,
  1885. int clock)
  1886. {
  1887. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1888. uint32_t cfgcr0, cfgcr1;
  1889. struct skl_wrpll_params wrpll_params = { 0, };
  1890. cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
  1891. if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
  1892. return false;
  1893. cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
  1894. wrpll_params.dco_integer;
  1895. cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
  1896. DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
  1897. DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
  1898. DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
  1899. DPLL_CFGCR1_CENTRAL_FREQ;
  1900. memset(&crtc_state->dpll_hw_state, 0,
  1901. sizeof(crtc_state->dpll_hw_state));
  1902. crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
  1903. crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
  1904. return true;
  1905. }
  1906. static bool
  1907. cnl_ddi_dp_set_dpll_hw_state(int clock,
  1908. struct intel_dpll_hw_state *dpll_hw_state)
  1909. {
  1910. uint32_t cfgcr0;
  1911. cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
  1912. switch (clock / 2) {
  1913. case 81000:
  1914. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
  1915. break;
  1916. case 135000:
  1917. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
  1918. break;
  1919. case 270000:
  1920. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
  1921. break;
  1922. /* eDP 1.4 rates */
  1923. case 162000:
  1924. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
  1925. break;
  1926. case 108000:
  1927. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
  1928. break;
  1929. case 216000:
  1930. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
  1931. break;
  1932. case 324000:
  1933. /* Some SKUs may require elevated I/O voltage to support this */
  1934. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
  1935. break;
  1936. case 405000:
  1937. /* Some SKUs may require elevated I/O voltage to support this */
  1938. cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
  1939. break;
  1940. }
  1941. dpll_hw_state->cfgcr0 = cfgcr0;
  1942. return true;
  1943. }
  1944. static struct intel_shared_dpll *
  1945. cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
  1946. struct intel_encoder *encoder)
  1947. {
  1948. struct intel_shared_dpll *pll;
  1949. int clock = crtc_state->port_clock;
  1950. bool bret;
  1951. struct intel_dpll_hw_state dpll_hw_state;
  1952. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  1953. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
  1954. bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
  1955. if (!bret) {
  1956. DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
  1957. return NULL;
  1958. }
  1959. } else if (intel_crtc_has_dp_encoder(crtc_state)) {
  1960. bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
  1961. if (!bret) {
  1962. DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
  1963. return NULL;
  1964. }
  1965. crtc_state->dpll_hw_state = dpll_hw_state;
  1966. } else {
  1967. DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
  1968. crtc_state->output_types);
  1969. return NULL;
  1970. }
  1971. pll = intel_find_shared_dpll(crtc, crtc_state,
  1972. DPLL_ID_SKL_DPLL0,
  1973. DPLL_ID_SKL_DPLL2);
  1974. if (!pll) {
  1975. DRM_DEBUG_KMS("No PLL selected\n");
  1976. return NULL;
  1977. }
  1978. intel_reference_shared_dpll(pll, crtc_state);
  1979. return pll;
  1980. }
  1981. static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
  1982. struct intel_dpll_hw_state *hw_state)
  1983. {
  1984. DRM_DEBUG_KMS("dpll_hw_state: "
  1985. "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
  1986. hw_state->cfgcr0,
  1987. hw_state->cfgcr1);
  1988. }
  1989. static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
  1990. .enable = cnl_ddi_pll_enable,
  1991. .disable = cnl_ddi_pll_disable,
  1992. .get_hw_state = cnl_ddi_pll_get_hw_state,
  1993. };
  1994. static const struct dpll_info cnl_plls[] = {
  1995. { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
  1996. { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
  1997. { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
  1998. { NULL, -1, NULL, },
  1999. };
  2000. static const struct intel_dpll_mgr cnl_pll_mgr = {
  2001. .dpll_info = cnl_plls,
  2002. .get_dpll = cnl_get_dpll,
  2003. .dump_hw_state = cnl_dump_hw_state,
  2004. };
  2005. /**
  2006. * intel_shared_dpll_init - Initialize shared DPLLs
  2007. * @dev: drm device
  2008. *
  2009. * Initialize shared DPLLs for @dev.
  2010. */
  2011. void intel_shared_dpll_init(struct drm_device *dev)
  2012. {
  2013. struct drm_i915_private *dev_priv = to_i915(dev);
  2014. const struct intel_dpll_mgr *dpll_mgr = NULL;
  2015. const struct dpll_info *dpll_info;
  2016. int i;
  2017. if (IS_CANNONLAKE(dev_priv))
  2018. dpll_mgr = &cnl_pll_mgr;
  2019. else if (IS_GEN9_BC(dev_priv))
  2020. dpll_mgr = &skl_pll_mgr;
  2021. else if (IS_GEN9_LP(dev_priv))
  2022. dpll_mgr = &bxt_pll_mgr;
  2023. else if (HAS_DDI(dev_priv))
  2024. dpll_mgr = &hsw_pll_mgr;
  2025. else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  2026. dpll_mgr = &pch_pll_mgr;
  2027. if (!dpll_mgr) {
  2028. dev_priv->num_shared_dpll = 0;
  2029. return;
  2030. }
  2031. dpll_info = dpll_mgr->dpll_info;
  2032. for (i = 0; dpll_info[i].id >= 0; i++) {
  2033. WARN_ON(i != dpll_info[i].id);
  2034. dev_priv->shared_dplls[i].id = dpll_info[i].id;
  2035. dev_priv->shared_dplls[i].name = dpll_info[i].name;
  2036. dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
  2037. dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
  2038. }
  2039. dev_priv->dpll_mgr = dpll_mgr;
  2040. dev_priv->num_shared_dpll = i;
  2041. mutex_init(&dev_priv->dpll_lock);
  2042. BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
  2043. /* FIXME: Move this to a more suitable place */
  2044. if (HAS_DDI(dev_priv))
  2045. intel_ddi_pll_init(dev);
  2046. }
  2047. /**
  2048. * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
  2049. * @crtc: CRTC
  2050. * @crtc_state: atomic state for @crtc
  2051. * @encoder: encoder
  2052. *
  2053. * Find an appropriate DPLL for the given CRTC and encoder combination. A
  2054. * reference from the @crtc to the returned pll is registered in the atomic
  2055. * state. That configuration is made effective by calling
  2056. * intel_shared_dpll_swap_state(). The reference should be released by calling
  2057. * intel_release_shared_dpll().
  2058. *
  2059. * Returns:
  2060. * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
  2061. */
  2062. struct intel_shared_dpll *
  2063. intel_get_shared_dpll(struct intel_crtc *crtc,
  2064. struct intel_crtc_state *crtc_state,
  2065. struct intel_encoder *encoder)
  2066. {
  2067. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  2068. const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
  2069. if (WARN_ON(!dpll_mgr))
  2070. return NULL;
  2071. return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
  2072. }
  2073. /**
  2074. * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
  2075. * @dpll: dpll in use by @crtc
  2076. * @crtc: crtc
  2077. * @state: atomic state
  2078. *
  2079. * This function releases the reference from @crtc to @dpll from the
  2080. * atomic @state. The new configuration is made effective by calling
  2081. * intel_shared_dpll_swap_state().
  2082. */
  2083. void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
  2084. struct intel_crtc *crtc,
  2085. struct drm_atomic_state *state)
  2086. {
  2087. struct intel_shared_dpll_state *shared_dpll_state;
  2088. shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
  2089. shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
  2090. }
  2091. /**
  2092. * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
  2093. * @dev_priv: i915 drm device
  2094. * @hw_state: hw state to be written to the log
  2095. *
  2096. * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
  2097. */
  2098. void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
  2099. struct intel_dpll_hw_state *hw_state)
  2100. {
  2101. if (dev_priv->dpll_mgr) {
  2102. dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
  2103. } else {
  2104. /* fallback for platforms that don't use the shared dpll
  2105. * infrastructure
  2106. */
  2107. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  2108. "fp0: 0x%x, fp1: 0x%x\n",
  2109. hw_state->dpll,
  2110. hw_state->dpll_md,
  2111. hw_state->fp0,
  2112. hw_state->fp1);
  2113. }
  2114. }