dce_clocks.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * Copyright 2012-16 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dce_clocks.h"
  26. #include "dm_services.h"
  27. #include "reg_helper.h"
  28. #include "fixed32_32.h"
  29. #include "bios_parser_interface.h"
  30. #include "dc.h"
  31. #include "dce_abm.h"
  32. #include "dmcu.h"
  33. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  34. #include "dcn_calcs.h"
  35. #endif
  36. #include "core_types.h"
  37. #define TO_DCE_CLOCKS(clocks)\
  38. container_of(clocks, struct dce_disp_clk, base)
  39. #define REG(reg) \
  40. (clk_dce->regs->reg)
  41. #undef FN
  42. #define FN(reg_name, field_name) \
  43. clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
  44. #define CTX \
  45. clk_dce->base.ctx
  46. /* Max clock values for each state indexed by "enum clocks_state": */
  47. static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
  48. /* ClocksStateInvalid - should not be used */
  49. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  50. /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
  51. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  52. /* ClocksStateLow */
  53. { .display_clk_khz = 352000, .pixel_clk_khz = 330000},
  54. /* ClocksStateNominal */
  55. { .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
  56. /* ClocksStatePerformance */
  57. { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
  58. static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
  59. /*ClocksStateInvalid - should not be used*/
  60. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  61. /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
  62. { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
  63. /*ClocksStateLow*/
  64. { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
  65. /*ClocksStateNominal*/
  66. { .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
  67. /*ClocksStatePerformance*/
  68. { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
  69. static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
  70. /*ClocksStateInvalid - should not be used*/
  71. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  72. /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
  73. { .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
  74. /*ClocksStateLow*/
  75. { .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
  76. /*ClocksStateNominal*/
  77. { .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
  78. /*ClocksStatePerformance*/
  79. { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
  80. static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
  81. /*ClocksStateInvalid - should not be used*/
  82. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  83. /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
  84. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  85. /*ClocksStateLow*/
  86. { .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
  87. /*ClocksStateNominal*/
  88. { .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
  89. /*ClocksStatePerformance*/
  90. { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
  91. /* Starting point for each divider range.*/
  92. enum dce_divider_range_start {
  93. DIVIDER_RANGE_01_START = 200, /* 2.00*/
  94. DIVIDER_RANGE_02_START = 1600, /* 16.00*/
  95. DIVIDER_RANGE_03_START = 3200, /* 32.00*/
  96. DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
  97. };
  98. /* Ranges for divider identifiers (Divider ID or DID)
  99. mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
  100. enum dce_divider_id_register_setting {
  101. DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
  102. DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
  103. DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
  104. DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
  105. };
  106. /* Step size between each divider within a range.
  107. Incrementing the DENTIST_DISPCLK_WDIVIDER by one
  108. will increment the divider by this much.*/
  109. enum dce_divider_range_step_size {
  110. DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
  111. DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
  112. DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
  113. };
  114. static bool dce_divider_range_construct(
  115. struct dce_divider_range *div_range,
  116. int range_start,
  117. int range_step,
  118. int did_min,
  119. int did_max)
  120. {
  121. div_range->div_range_start = range_start;
  122. div_range->div_range_step = range_step;
  123. div_range->did_min = did_min;
  124. div_range->did_max = did_max;
  125. if (div_range->div_range_step == 0) {
  126. div_range->div_range_step = 1;
  127. /*div_range_step cannot be zero*/
  128. BREAK_TO_DEBUGGER();
  129. }
  130. /* Calculate this based on the other inputs.*/
  131. /* See DividerRange.h for explanation of */
  132. /* the relationship between divider id (DID) and a divider.*/
  133. /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
  134. /* Maximum divider identified in this range =
  135. * (Number of Divider IDs)*Step size between dividers
  136. * + The start of this range.*/
  137. div_range->div_range_end = (did_max - did_min) * range_step
  138. + range_start;
  139. return true;
  140. }
  141. static int dce_divider_range_calc_divider(
  142. struct dce_divider_range *div_range,
  143. int did)
  144. {
  145. /* Is this DID within our range?*/
  146. if ((did < div_range->did_min) || (did >= div_range->did_max))
  147. return INVALID_DIVIDER;
  148. return ((did - div_range->did_min) * div_range->div_range_step)
  149. + div_range->div_range_start;
  150. }
  151. static int dce_divider_range_get_divider(
  152. struct dce_divider_range *div_range,
  153. int ranges_num,
  154. int did)
  155. {
  156. int div = INVALID_DIVIDER;
  157. int i;
  158. for (i = 0; i < ranges_num; i++) {
  159. /* Calculate divider with given divider ID*/
  160. div = dce_divider_range_calc_divider(&div_range[i], did);
  161. /* Found a valid return divider*/
  162. if (div != INVALID_DIVIDER)
  163. break;
  164. }
  165. return div;
  166. }
  167. static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
  168. {
  169. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  170. int dprefclk_wdivider;
  171. int dprefclk_src_sel;
  172. int dp_ref_clk_khz = 600000;
  173. int target_div = INVALID_DIVIDER;
  174. /* ASSERT DP Reference Clock source is from DFS*/
  175. REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
  176. ASSERT(dprefclk_src_sel == 0);
  177. /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
  178. * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
  179. REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
  180. /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
  181. target_div = dce_divider_range_get_divider(
  182. clk_dce->divider_ranges,
  183. DIVIDER_RANGE_MAX,
  184. dprefclk_wdivider);
  185. if (target_div != INVALID_DIVIDER) {
  186. /* Calculate the current DFS clock, in kHz.*/
  187. dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
  188. * clk_dce->dentist_vco_freq_khz) / target_div;
  189. }
  190. /* SW will adjust DP REF Clock average value for all purposes
  191. * (DP DTO / DP Audio DTO and DP GTC)
  192. if clock is spread for all cases:
  193. -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
  194. calculations for DS_INCR/DS_MODULO (this is planned to be default case)
  195. -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
  196. calculations (not planned to be used, but average clock should still
  197. be valid)
  198. -if SS enabled on DP Ref clock and HW de-spreading disabled
  199. (should not be case with CIK) then SW should program all rates
  200. generated according to average value (case as with previous ASICs)
  201. */
  202. if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
  203. struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
  204. dal_fixed32_32_from_fraction(
  205. clk_dce->dprefclk_ss_percentage,
  206. clk_dce->dprefclk_ss_divider), 200);
  207. struct fixed32_32 adj_dp_ref_clk_khz;
  208. ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
  209. ss_percentage);
  210. adj_dp_ref_clk_khz =
  211. dal_fixed32_32_mul_int(
  212. ss_percentage,
  213. dp_ref_clk_khz);
  214. dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
  215. }
  216. return dp_ref_clk_khz;
  217. }
  218. /* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
  219. * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
  220. * clock implementation
  221. */
  222. static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
  223. {
  224. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  225. int dp_ref_clk_khz = 600000;
  226. if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
  227. struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
  228. dal_fixed32_32_from_fraction(
  229. clk_dce->dprefclk_ss_percentage,
  230. clk_dce->dprefclk_ss_divider), 200);
  231. struct fixed32_32 adj_dp_ref_clk_khz;
  232. ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
  233. ss_percentage);
  234. adj_dp_ref_clk_khz =
  235. dal_fixed32_32_mul_int(
  236. ss_percentage,
  237. dp_ref_clk_khz);
  238. dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
  239. }
  240. return dp_ref_clk_khz;
  241. }
  242. static enum dm_pp_clocks_state dce_get_required_clocks_state(
  243. struct display_clock *clk,
  244. struct state_dependent_clocks *req_clocks)
  245. {
  246. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  247. int i;
  248. enum dm_pp_clocks_state low_req_clk;
  249. /* Iterate from highest supported to lowest valid state, and update
  250. * lowest RequiredState with the lowest state that satisfies
  251. * all required clocks
  252. */
  253. for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
  254. if (req_clocks->display_clk_khz >
  255. clk_dce->max_clks_by_state[i].display_clk_khz
  256. || req_clocks->pixel_clk_khz >
  257. clk_dce->max_clks_by_state[i].pixel_clk_khz)
  258. break;
  259. low_req_clk = i + 1;
  260. if (low_req_clk > clk->max_clks_state) {
  261. dm_logger_write(clk->ctx->logger, LOG_WARNING,
  262. "%s: clocks unsupported", __func__);
  263. low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
  264. }
  265. return low_req_clk;
  266. }
  267. static bool dce_clock_set_min_clocks_state(
  268. struct display_clock *clk,
  269. enum dm_pp_clocks_state clocks_state)
  270. {
  271. struct dm_pp_power_level_change_request level_change_req = {
  272. clocks_state };
  273. if (clocks_state > clk->max_clks_state) {
  274. /*Requested state exceeds max supported state.*/
  275. dm_logger_write(clk->ctx->logger, LOG_WARNING,
  276. "Requested state exceeds max supported state");
  277. return false;
  278. } else if (clocks_state == clk->cur_min_clks_state) {
  279. /*if we're trying to set the same state, we can just return
  280. * since nothing needs to be done*/
  281. return true;
  282. }
  283. /* get max clock state from PPLIB */
  284. if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
  285. clk->cur_min_clks_state = clocks_state;
  286. return true;
  287. }
  288. static int dce_set_clock(
  289. struct display_clock *clk,
  290. int requested_clk_khz)
  291. {
  292. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  293. struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
  294. struct dc_bios *bp = clk->ctx->dc_bios;
  295. int actual_clock = requested_clk_khz;
  296. /* Make sure requested clock isn't lower than minimum threshold*/
  297. if (requested_clk_khz > 0)
  298. requested_clk_khz = max(requested_clk_khz,
  299. clk_dce->dentist_vco_freq_khz / 64);
  300. /* Prepare to program display clock*/
  301. pxl_clk_params.target_pixel_clock = requested_clk_khz;
  302. pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
  303. bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
  304. if (clk_dce->dfs_bypass_enabled) {
  305. /* Cache the fixed display clock*/
  306. clk_dce->dfs_bypass_disp_clk =
  307. pxl_clk_params.dfs_bypass_display_clock;
  308. actual_clock = pxl_clk_params.dfs_bypass_display_clock;
  309. }
  310. /* from power down, we need mark the clock state as ClocksStateNominal
  311. * from HWReset, so when resume we will call pplib voltage regulator.*/
  312. if (requested_clk_khz == 0)
  313. clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  314. return actual_clock;
  315. }
  316. static int dce_psr_set_clock(
  317. struct display_clock *clk,
  318. int requested_clk_khz)
  319. {
  320. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  321. struct dc_context *ctx = clk_dce->base.ctx;
  322. struct dc *core_dc = ctx->dc;
  323. struct dmcu *dmcu = core_dc->res_pool->dmcu;
  324. int actual_clk_khz = requested_clk_khz;
  325. actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
  326. dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
  327. return actual_clk_khz;
  328. }
  329. static int dce112_set_clock(
  330. struct display_clock *clk,
  331. int requested_clk_khz)
  332. {
  333. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  334. struct bp_set_dce_clock_parameters dce_clk_params;
  335. struct dc_bios *bp = clk->ctx->dc_bios;
  336. struct dc *core_dc = clk->ctx->dc;
  337. struct abm *abm = core_dc->res_pool->abm;
  338. struct dmcu *dmcu = core_dc->res_pool->dmcu;
  339. int actual_clock = requested_clk_khz;
  340. /* Prepare to program display clock*/
  341. memset(&dce_clk_params, 0, sizeof(dce_clk_params));
  342. /* Make sure requested clock isn't lower than minimum threshold*/
  343. if (requested_clk_khz > 0)
  344. requested_clk_khz = max(requested_clk_khz,
  345. clk_dce->dentist_vco_freq_khz / 62);
  346. dce_clk_params.target_clock_frequency = requested_clk_khz;
  347. dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
  348. dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
  349. bp->funcs->set_dce_clock(bp, &dce_clk_params);
  350. actual_clock = dce_clk_params.target_clock_frequency;
  351. /* from power down, we need mark the clock state as ClocksStateNominal
  352. * from HWReset, so when resume we will call pplib voltage regulator.*/
  353. if (requested_clk_khz == 0)
  354. clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  355. /*Program DP ref Clock*/
  356. /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
  357. dce_clk_params.target_clock_frequency = 0;
  358. dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
  359. dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
  360. (dce_clk_params.pll_id ==
  361. CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
  362. bp->funcs->set_dce_clock(bp, &dce_clk_params);
  363. if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
  364. dmcu->funcs->set_psr_wait_loop(dmcu,
  365. actual_clock / 1000 / 7);
  366. clk_dce->dfs_bypass_disp_clk = actual_clock;
  367. return actual_clock;
  368. }
  369. static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
  370. {
  371. struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
  372. struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
  373. struct integrated_info info = { { { 0 } } };
  374. struct dc_firmware_info fw_info = { { 0 } };
  375. int i;
  376. if (bp->integrated_info)
  377. info = *bp->integrated_info;
  378. clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
  379. if (clk_dce->dentist_vco_freq_khz == 0) {
  380. bp->funcs->get_firmware_info(bp, &fw_info);
  381. clk_dce->dentist_vco_freq_khz =
  382. fw_info.smu_gpu_pll_output_freq;
  383. if (clk_dce->dentist_vco_freq_khz == 0)
  384. clk_dce->dentist_vco_freq_khz = 3600000;
  385. }
  386. /*update the maximum display clock for each power state*/
  387. for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
  388. enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
  389. switch (i) {
  390. case 0:
  391. clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
  392. break;
  393. case 1:
  394. clk_state = DM_PP_CLOCKS_STATE_LOW;
  395. break;
  396. case 2:
  397. clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
  398. break;
  399. case 3:
  400. clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
  401. break;
  402. default:
  403. clk_state = DM_PP_CLOCKS_STATE_INVALID;
  404. break;
  405. }
  406. /*Do not allow bad VBIOS/SBIOS to override with invalid values,
  407. * check for > 100MHz*/
  408. if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
  409. clk_dce->max_clks_by_state[clk_state].display_clk_khz =
  410. info.disp_clk_voltage[i].max_supported_clk;
  411. }
  412. if (!debug->disable_dfs_bypass && bp->integrated_info)
  413. if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
  414. clk_dce->dfs_bypass_enabled = true;
  415. clk_dce->use_max_disp_clk = debug->max_disp_clk;
  416. }
  417. static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
  418. {
  419. struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
  420. int ss_info_num = bp->funcs->get_ss_entry_number(
  421. bp, AS_SIGNAL_TYPE_GPU_PLL);
  422. if (ss_info_num) {
  423. struct spread_spectrum_info info = { { 0 } };
  424. enum bp_result result = bp->funcs->get_spread_spectrum_info(
  425. bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
  426. /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
  427. * even if SS not enabled and in that case
  428. * SSInfo.spreadSpectrumPercentage !=0 would be sign
  429. * that SS is enabled
  430. */
  431. if (result == BP_RESULT_OK &&
  432. info.spread_spectrum_percentage != 0) {
  433. clk_dce->ss_on_dprefclk = true;
  434. clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
  435. if (info.type.CENTER_MODE == 0) {
  436. /* TODO: Currently for DP Reference clock we
  437. * need only SS percentage for
  438. * downspread */
  439. clk_dce->dprefclk_ss_percentage =
  440. info.spread_spectrum_percentage;
  441. }
  442. return;
  443. }
  444. result = bp->funcs->get_spread_spectrum_info(
  445. bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
  446. /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
  447. * even if SS not enabled and in that case
  448. * SSInfo.spreadSpectrumPercentage !=0 would be sign
  449. * that SS is enabled
  450. */
  451. if (result == BP_RESULT_OK &&
  452. info.spread_spectrum_percentage != 0) {
  453. clk_dce->ss_on_dprefclk = true;
  454. clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
  455. if (info.type.CENTER_MODE == 0) {
  456. /* Currently for DP Reference clock we
  457. * need only SS percentage for
  458. * downspread */
  459. clk_dce->dprefclk_ss_percentage =
  460. info.spread_spectrum_percentage;
  461. }
  462. }
  463. }
  464. }
  465. static bool dce_apply_clock_voltage_request(
  466. struct display_clock *clk,
  467. enum dm_pp_clock_type clocks_type,
  468. int clocks_in_khz,
  469. bool pre_mode_set,
  470. bool update_dp_phyclk)
  471. {
  472. bool send_request = false;
  473. struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
  474. switch (clocks_type) {
  475. case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
  476. case DM_PP_CLOCK_TYPE_PIXELCLK:
  477. case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
  478. break;
  479. default:
  480. BREAK_TO_DEBUGGER();
  481. return false;
  482. }
  483. clock_voltage_req.clk_type = clocks_type;
  484. clock_voltage_req.clocks_in_khz = clocks_in_khz;
  485. /* to pplib */
  486. if (pre_mode_set) {
  487. switch (clocks_type) {
  488. case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
  489. if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
  490. clk->cur_clocks_value.dispclk_notify_pplib_done = true;
  491. send_request = true;
  492. } else
  493. clk->cur_clocks_value.dispclk_notify_pplib_done = false;
  494. /* no matter incrase or decrase clock, update current clock value */
  495. clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
  496. break;
  497. case DM_PP_CLOCK_TYPE_PIXELCLK:
  498. if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
  499. clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
  500. send_request = true;
  501. } else
  502. clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
  503. /* no matter incrase or decrase clock, update current clock value */
  504. clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
  505. break;
  506. case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
  507. if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
  508. clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
  509. send_request = true;
  510. } else
  511. clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
  512. /* no matter incrase or decrase clock, update current clock value */
  513. clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
  514. break;
  515. default:
  516. ASSERT(0);
  517. break;
  518. }
  519. } else {
  520. switch (clocks_type) {
  521. case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
  522. if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
  523. send_request = true;
  524. break;
  525. case DM_PP_CLOCK_TYPE_PIXELCLK:
  526. if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
  527. send_request = true;
  528. break;
  529. case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
  530. if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
  531. send_request = true;
  532. break;
  533. default:
  534. ASSERT(0);
  535. break;
  536. }
  537. }
  538. if (send_request) {
  539. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  540. if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
  541. struct dc *core_dc = clk->ctx->dc;
  542. /*use dcfclk request voltage*/
  543. clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
  544. clock_voltage_req.clocks_in_khz =
  545. dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
  546. }
  547. #endif
  548. dm_pp_apply_clock_for_voltage_request(
  549. clk->ctx, &clock_voltage_req);
  550. }
  551. if (update_dp_phyclk && (clocks_in_khz >
  552. clk->cur_clocks_value.max_dp_phyclk_in_khz))
  553. clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
  554. return true;
  555. }
  556. static const struct display_clock_funcs dce120_funcs = {
  557. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
  558. .apply_clock_voltage_request = dce_apply_clock_voltage_request,
  559. .set_clock = dce112_set_clock
  560. };
  561. static const struct display_clock_funcs dce112_funcs = {
  562. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  563. .get_required_clocks_state = dce_get_required_clocks_state,
  564. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  565. .set_clock = dce112_set_clock
  566. };
  567. static const struct display_clock_funcs dce110_funcs = {
  568. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  569. .get_required_clocks_state = dce_get_required_clocks_state,
  570. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  571. .set_clock = dce_psr_set_clock
  572. };
  573. static const struct display_clock_funcs dce_funcs = {
  574. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  575. .get_required_clocks_state = dce_get_required_clocks_state,
  576. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  577. .set_clock = dce_set_clock
  578. };
  579. static void dce_disp_clk_construct(
  580. struct dce_disp_clk *clk_dce,
  581. struct dc_context *ctx,
  582. const struct dce_disp_clk_registers *regs,
  583. const struct dce_disp_clk_shift *clk_shift,
  584. const struct dce_disp_clk_mask *clk_mask)
  585. {
  586. struct display_clock *base = &clk_dce->base;
  587. base->ctx = ctx;
  588. base->funcs = &dce_funcs;
  589. clk_dce->regs = regs;
  590. clk_dce->clk_shift = clk_shift;
  591. clk_dce->clk_mask = clk_mask;
  592. clk_dce->dfs_bypass_disp_clk = 0;
  593. clk_dce->dprefclk_ss_percentage = 0;
  594. clk_dce->dprefclk_ss_divider = 1000;
  595. clk_dce->ss_on_dprefclk = false;
  596. base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  597. base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
  598. dce_clock_read_integrated_info(clk_dce);
  599. dce_clock_read_ss_info(clk_dce);
  600. dce_divider_range_construct(
  601. &clk_dce->divider_ranges[DIVIDER_RANGE_01],
  602. DIVIDER_RANGE_01_START,
  603. DIVIDER_RANGE_01_STEP_SIZE,
  604. DIVIDER_RANGE_01_BASE_DIVIDER_ID,
  605. DIVIDER_RANGE_02_BASE_DIVIDER_ID);
  606. dce_divider_range_construct(
  607. &clk_dce->divider_ranges[DIVIDER_RANGE_02],
  608. DIVIDER_RANGE_02_START,
  609. DIVIDER_RANGE_02_STEP_SIZE,
  610. DIVIDER_RANGE_02_BASE_DIVIDER_ID,
  611. DIVIDER_RANGE_03_BASE_DIVIDER_ID);
  612. dce_divider_range_construct(
  613. &clk_dce->divider_ranges[DIVIDER_RANGE_03],
  614. DIVIDER_RANGE_03_START,
  615. DIVIDER_RANGE_03_STEP_SIZE,
  616. DIVIDER_RANGE_03_BASE_DIVIDER_ID,
  617. DIVIDER_RANGE_MAX_DIVIDER_ID);
  618. }
  619. struct display_clock *dce_disp_clk_create(
  620. struct dc_context *ctx,
  621. const struct dce_disp_clk_registers *regs,
  622. const struct dce_disp_clk_shift *clk_shift,
  623. const struct dce_disp_clk_mask *clk_mask)
  624. {
  625. struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
  626. if (clk_dce == NULL) {
  627. BREAK_TO_DEBUGGER();
  628. return NULL;
  629. }
  630. memcpy(clk_dce->max_clks_by_state,
  631. dce80_max_clks_by_state,
  632. sizeof(dce80_max_clks_by_state));
  633. dce_disp_clk_construct(
  634. clk_dce, ctx, regs, clk_shift, clk_mask);
  635. return &clk_dce->base;
  636. }
  637. struct display_clock *dce110_disp_clk_create(
  638. struct dc_context *ctx,
  639. const struct dce_disp_clk_registers *regs,
  640. const struct dce_disp_clk_shift *clk_shift,
  641. const struct dce_disp_clk_mask *clk_mask)
  642. {
  643. struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
  644. if (clk_dce == NULL) {
  645. BREAK_TO_DEBUGGER();
  646. return NULL;
  647. }
  648. memcpy(clk_dce->max_clks_by_state,
  649. dce110_max_clks_by_state,
  650. sizeof(dce110_max_clks_by_state));
  651. dce_disp_clk_construct(
  652. clk_dce, ctx, regs, clk_shift, clk_mask);
  653. clk_dce->base.funcs = &dce110_funcs;
  654. return &clk_dce->base;
  655. }
  656. struct display_clock *dce112_disp_clk_create(
  657. struct dc_context *ctx,
  658. const struct dce_disp_clk_registers *regs,
  659. const struct dce_disp_clk_shift *clk_shift,
  660. const struct dce_disp_clk_mask *clk_mask)
  661. {
  662. struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
  663. if (clk_dce == NULL) {
  664. BREAK_TO_DEBUGGER();
  665. return NULL;
  666. }
  667. memcpy(clk_dce->max_clks_by_state,
  668. dce112_max_clks_by_state,
  669. sizeof(dce112_max_clks_by_state));
  670. dce_disp_clk_construct(
  671. clk_dce, ctx, regs, clk_shift, clk_mask);
  672. clk_dce->base.funcs = &dce112_funcs;
  673. return &clk_dce->base;
  674. }
  675. struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
  676. {
  677. struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
  678. struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
  679. if (clk_dce == NULL) {
  680. BREAK_TO_DEBUGGER();
  681. return NULL;
  682. }
  683. memcpy(clk_dce->max_clks_by_state,
  684. dce120_max_clks_by_state,
  685. sizeof(dce120_max_clks_by_state));
  686. dce_disp_clk_construct(
  687. clk_dce, ctx, NULL, NULL, NULL);
  688. clk_dce->base.funcs = &dce120_funcs;
  689. /* new in dce120 */
  690. if (!ctx->dc->debug.disable_pplib_clock_request &&
  691. dm_pp_get_clock_levels_by_type_with_voltage(
  692. ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
  693. && clk_level_info.num_levels)
  694. clk_dce->max_displ_clk_in_khz =
  695. clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
  696. else
  697. clk_dce->max_displ_clk_in_khz = 1133000;
  698. return &clk_dce->base;
  699. }
  700. void dce_disp_clk_destroy(struct display_clock **disp_clk)
  701. {
  702. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
  703. kfree(clk_dce);
  704. *disp_clk = NULL;
  705. }