dce_clocks.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /*
  2. * Copyright 2012-16 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dce_clocks.h"
  26. #include "dm_services.h"
  27. #include "reg_helper.h"
  28. #include "fixed32_32.h"
  29. #include "bios_parser_interface.h"
  30. #include "dc.h"
  31. #define TO_DCE_CLOCKS(clocks)\
  32. container_of(clocks, struct dce_disp_clk, base)
  33. #define REG(reg) \
  34. (clk_dce->regs->reg)
  35. #undef FN
  36. #define FN(reg_name, field_name) \
  37. clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
  38. #define CTX \
  39. clk_dce->base.ctx
  40. /* Max clock values for each state indexed by "enum clocks_state": */
  41. static struct state_dependent_clocks dce80_max_clks_by_state[] = {
  42. /* ClocksStateInvalid - should not be used */
  43. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  44. /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
  45. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  46. /* ClocksStateLow */
  47. { .display_clk_khz = 352000, .pixel_clk_khz = 330000},
  48. /* ClocksStateNominal */
  49. { .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
  50. /* ClocksStatePerformance */
  51. { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
  52. static struct state_dependent_clocks dce110_max_clks_by_state[] = {
  53. /*ClocksStateInvalid - should not be used*/
  54. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  55. /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
  56. { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
  57. /*ClocksStateLow*/
  58. { .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
  59. /*ClocksStateNominal*/
  60. { .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
  61. /*ClocksStatePerformance*/
  62. { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
  63. static struct state_dependent_clocks dce112_max_clks_by_state[] = {
  64. /*ClocksStateInvalid - should not be used*/
  65. { .display_clk_khz = 0, .pixel_clk_khz = 0 },
  66. /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
  67. { .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
  68. /*ClocksStateLow*/
  69. { .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
  70. /*ClocksStateNominal*/
  71. { .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
  72. /*ClocksStatePerformance*/
  73. { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
  74. /* Starting point for each divider range.*/
  75. enum dce_divider_range_start {
  76. DIVIDER_RANGE_01_START = 200, /* 2.00*/
  77. DIVIDER_RANGE_02_START = 1600, /* 16.00*/
  78. DIVIDER_RANGE_03_START = 3200, /* 32.00*/
  79. DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
  80. };
  81. /* Ranges for divider identifiers (Divider ID or DID)
  82. mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
  83. enum dce_divider_id_register_setting {
  84. DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
  85. DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
  86. DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
  87. DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
  88. };
  89. /* Step size between each divider within a range.
  90. Incrementing the DENTIST_DISPCLK_WDIVIDER by one
  91. will increment the divider by this much.*/
  92. enum dce_divider_range_step_size {
  93. DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
  94. DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
  95. DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
  96. };
  97. static bool dce_divider_range_construct(
  98. struct dce_divider_range *div_range,
  99. int range_start,
  100. int range_step,
  101. int did_min,
  102. int did_max)
  103. {
  104. div_range->div_range_start = range_start;
  105. div_range->div_range_step = range_step;
  106. div_range->did_min = did_min;
  107. div_range->did_max = did_max;
  108. if (div_range->div_range_step == 0) {
  109. div_range->div_range_step = 1;
  110. /*div_range_step cannot be zero*/
  111. BREAK_TO_DEBUGGER();
  112. }
  113. /* Calculate this based on the other inputs.*/
  114. /* See DividerRange.h for explanation of */
  115. /* the relationship between divider id (DID) and a divider.*/
  116. /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
  117. /* Maximum divider identified in this range =
  118. * (Number of Divider IDs)*Step size between dividers
  119. * + The start of this range.*/
  120. div_range->div_range_end = (did_max - did_min) * range_step
  121. + range_start;
  122. return true;
  123. }
  124. static int dce_divider_range_calc_divider(
  125. struct dce_divider_range *div_range,
  126. int did)
  127. {
  128. /* Is this DID within our range?*/
  129. if ((did < div_range->did_min) || (did >= div_range->did_max))
  130. return INVALID_DIVIDER;
  131. return ((did - div_range->did_min) * div_range->div_range_step)
  132. + div_range->div_range_start;
  133. }
  134. static int dce_divider_range_get_divider(
  135. struct dce_divider_range *div_range,
  136. int ranges_num,
  137. int did)
  138. {
  139. int div = INVALID_DIVIDER;
  140. int i;
  141. for (i = 0; i < ranges_num; i++) {
  142. /* Calculate divider with given divider ID*/
  143. div = dce_divider_range_calc_divider(&div_range[i], did);
  144. /* Found a valid return divider*/
  145. if (div != INVALID_DIVIDER)
  146. break;
  147. }
  148. return div;
  149. }
  150. static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
  151. {
  152. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  153. int dprefclk_wdivider;
  154. int dprefclk_src_sel;
  155. int dp_ref_clk_khz = 600000;
  156. int target_div = INVALID_DIVIDER;
  157. /* ASSERT DP Reference Clock source is from DFS*/
  158. REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
  159. ASSERT(dprefclk_src_sel == 0);
  160. /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
  161. * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
  162. REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
  163. /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
  164. target_div = dce_divider_range_get_divider(
  165. clk_dce->divider_ranges,
  166. DIVIDER_RANGE_MAX,
  167. dprefclk_wdivider);
  168. if (target_div != INVALID_DIVIDER) {
  169. /* Calculate the current DFS clock, in kHz.*/
  170. dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
  171. * clk_dce->dentist_vco_freq_khz) / target_div;
  172. }
  173. /* SW will adjust DP REF Clock average value for all purposes
  174. * (DP DTO / DP Audio DTO and DP GTC)
  175. if clock is spread for all cases:
  176. -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
  177. calculations for DS_INCR/DS_MODULO (this is planned to be default case)
  178. -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
  179. calculations (not planned to be used, but average clock should still
  180. be valid)
  181. -if SS enabled on DP Ref clock and HW de-spreading disabled
  182. (should not be case with CIK) then SW should program all rates
  183. generated according to average value (case as with previous ASICs)
  184. */
  185. if (clk_dce->ss_on_gpu_pll && clk_dce->gpu_pll_ss_divider != 0) {
  186. struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
  187. dal_fixed32_32_from_fraction(
  188. clk_dce->gpu_pll_ss_percentage,
  189. clk_dce->gpu_pll_ss_divider), 200);
  190. struct fixed32_32 adj_dp_ref_clk_khz;
  191. ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
  192. ss_percentage);
  193. adj_dp_ref_clk_khz =
  194. dal_fixed32_32_mul_int(
  195. ss_percentage,
  196. dp_ref_clk_khz);
  197. dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
  198. }
  199. return dp_ref_clk_khz;
  200. }
  201. static enum dm_pp_clocks_state dce_get_required_clocks_state(
  202. struct display_clock *clk,
  203. struct state_dependent_clocks *req_clocks)
  204. {
  205. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  206. int i;
  207. enum dm_pp_clocks_state low_req_clk;
  208. /* Iterate from highest supported to lowest valid state, and update
  209. * lowest RequiredState with the lowest state that satisfies
  210. * all required clocks
  211. */
  212. for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
  213. if (req_clocks->display_clk_khz >
  214. clk_dce->max_clks_by_state[i].display_clk_khz
  215. || req_clocks->pixel_clk_khz >
  216. clk_dce->max_clks_by_state[i].pixel_clk_khz)
  217. break;
  218. low_req_clk = i + 1;
  219. if (low_req_clk > clk->max_clks_state) {
  220. dm_logger_write(clk->ctx->logger, LOG_WARNING,
  221. "%s: clocks unsupported", __func__);
  222. low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
  223. }
  224. return low_req_clk;
  225. }
  226. static bool dce_clock_set_min_clocks_state(
  227. struct display_clock *clk,
  228. enum dm_pp_clocks_state clocks_state)
  229. {
  230. struct dm_pp_power_level_change_request level_change_req = {
  231. clocks_state };
  232. if (clocks_state > clk->max_clks_state) {
  233. /*Requested state exceeds max supported state.*/
  234. dm_logger_write(clk->ctx->logger, LOG_WARNING,
  235. "Requested state exceeds max supported state");
  236. return false;
  237. } else if (clocks_state == clk->cur_min_clks_state) {
  238. /*if we're trying to set the same state, we can just return
  239. * since nothing needs to be done*/
  240. return true;
  241. }
  242. /* get max clock state from PPLIB */
  243. if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
  244. clk->cur_min_clks_state = clocks_state;
  245. return true;
  246. }
  247. static void dce_set_clock(
  248. struct display_clock *clk,
  249. int requested_clk_khz)
  250. {
  251. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  252. struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
  253. struct dc_bios *bp = clk->ctx->dc_bios;
  254. /* Make sure requested clock isn't lower than minimum threshold*/
  255. if (requested_clk_khz > 0)
  256. requested_clk_khz = dm_max(requested_clk_khz,
  257. clk_dce->dentist_vco_freq_khz / 64);
  258. /* Prepare to program display clock*/
  259. pxl_clk_params.target_pixel_clock = requested_clk_khz;
  260. pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
  261. bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
  262. if (clk_dce->dfs_bypass_enabled) {
  263. /* Cache the fixed display clock*/
  264. clk_dce->dfs_bypass_disp_clk =
  265. pxl_clk_params.dfs_bypass_display_clock;
  266. }
  267. /* from power down, we need mark the clock state as ClocksStateNominal
  268. * from HWReset, so when resume we will call pplib voltage regulator.*/
  269. if (requested_clk_khz == 0)
  270. clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  271. }
  272. #define PSR_SET_WAITLOOP 0x31
  273. union dce110_dmcu_psr_config_data_wait_loop_reg1 {
  274. struct {
  275. unsigned int wait_loop:16; /* [15:0] */
  276. unsigned int reserved:16; /* [31:16] */
  277. } bits;
  278. unsigned int u32;
  279. };
  280. static void dce_psr_wait_loop(
  281. struct dce_disp_clk *clk_dce, unsigned int display_clk_khz)
  282. {
  283. struct dc_context *ctx = clk_dce->base.ctx;
  284. union dce110_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
  285. /* waitDMCUReadyForCmd */
  286. REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 100);
  287. masterCmdData1.u32 = 0;
  288. masterCmdData1.bits.wait_loop = display_clk_khz / 1000 / 7;
  289. dm_write_reg(ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
  290. /* setDMCUParam_Cmd */
  291. REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
  292. /* notifyDMCUMsg */
  293. REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
  294. }
  295. static void dce_psr_set_clock(
  296. struct display_clock *clk,
  297. int requested_clk_khz)
  298. {
  299. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  300. dce_set_clock(clk, requested_clk_khz);
  301. dce_psr_wait_loop(clk_dce, requested_clk_khz);
  302. }
  303. static void dce112_set_clock(
  304. struct display_clock *clk,
  305. int requested_clk_khz)
  306. {
  307. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
  308. struct bp_set_dce_clock_parameters dce_clk_params;
  309. struct dc_bios *bp = clk->ctx->dc_bios;
  310. /* Prepare to program display clock*/
  311. memset(&dce_clk_params, 0, sizeof(dce_clk_params));
  312. /* Make sure requested clock isn't lower than minimum threshold*/
  313. if (requested_clk_khz > 0)
  314. requested_clk_khz = dm_max(requested_clk_khz,
  315. clk_dce->dentist_vco_freq_khz / 62);
  316. dce_clk_params.target_clock_frequency = requested_clk_khz;
  317. dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
  318. dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
  319. bp->funcs->set_dce_clock(bp, &dce_clk_params);
  320. /* from power down, we need mark the clock state as ClocksStateNominal
  321. * from HWReset, so when resume we will call pplib voltage regulator.*/
  322. if (requested_clk_khz == 0)
  323. clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  324. /*Program DP ref Clock*/
  325. /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
  326. dce_clk_params.target_clock_frequency = 0;
  327. dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
  328. dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
  329. (dce_clk_params.pll_id ==
  330. CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
  331. bp->funcs->set_dce_clock(bp, &dce_clk_params);
  332. }
  333. static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
  334. {
  335. struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
  336. struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
  337. struct integrated_info info = { 0 };
  338. struct firmware_info fw_info = { 0 };
  339. int i;
  340. if (bp->integrated_info)
  341. info = *bp->integrated_info;
  342. clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
  343. if (clk_dce->dentist_vco_freq_khz == 0) {
  344. bp->funcs->get_firmware_info(bp, &fw_info);
  345. clk_dce->dentist_vco_freq_khz =
  346. fw_info.smu_gpu_pll_output_freq;
  347. if (clk_dce->dentist_vco_freq_khz == 0)
  348. clk_dce->dentist_vco_freq_khz = 3600000;
  349. }
  350. /*update the maximum display clock for each power state*/
  351. for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
  352. enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
  353. switch (i) {
  354. case 0:
  355. clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
  356. break;
  357. case 1:
  358. clk_state = DM_PP_CLOCKS_STATE_LOW;
  359. break;
  360. case 2:
  361. clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
  362. break;
  363. case 3:
  364. clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
  365. break;
  366. default:
  367. clk_state = DM_PP_CLOCKS_STATE_INVALID;
  368. break;
  369. }
  370. /*Do not allow bad VBIOS/SBIOS to override with invalid values,
  371. * check for > 100MHz*/
  372. if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
  373. clk_dce->max_clks_by_state[clk_state].display_clk_khz =
  374. info.disp_clk_voltage[i].max_supported_clk;
  375. }
  376. if (!debug->disable_dfs_bypass && bp->integrated_info)
  377. if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
  378. clk_dce->dfs_bypass_enabled = true;
  379. clk_dce->use_max_disp_clk = debug->max_disp_clk;
  380. }
  381. static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
  382. {
  383. struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
  384. int ss_info_num = bp->funcs->get_ss_entry_number(
  385. bp, AS_SIGNAL_TYPE_GPU_PLL);
  386. if (ss_info_num) {
  387. struct spread_spectrum_info info = { 0 };
  388. enum bp_result result = bp->funcs->get_spread_spectrum_info(
  389. bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
  390. /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
  391. * even if SS not enabled and in that case
  392. * SSInfo.spreadSpectrumPercentage !=0 would be sign
  393. * that SS is enabled
  394. */
  395. if (result == BP_RESULT_OK &&
  396. info.spread_spectrum_percentage != 0) {
  397. clk_dce->ss_on_gpu_pll = true;
  398. clk_dce->gpu_pll_ss_divider = info.spread_percentage_divider;
  399. if (info.type.CENTER_MODE == 0) {
  400. /* Currently for DP Reference clock we
  401. * need only SS percentage for
  402. * downspread */
  403. clk_dce->gpu_pll_ss_percentage =
  404. info.spread_spectrum_percentage;
  405. }
  406. }
  407. }
  408. }
  409. static const struct display_clock_funcs dce112_funcs = {
  410. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  411. .get_required_clocks_state = dce_get_required_clocks_state,
  412. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  413. .set_clock = dce112_set_clock
  414. };
  415. static const struct display_clock_funcs dce110_funcs = {
  416. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  417. .get_required_clocks_state = dce_get_required_clocks_state,
  418. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  419. .set_clock = dce_psr_set_clock
  420. };
  421. static const struct display_clock_funcs dce_funcs = {
  422. .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
  423. .get_required_clocks_state = dce_get_required_clocks_state,
  424. .set_min_clocks_state = dce_clock_set_min_clocks_state,
  425. .set_clock = dce_set_clock
  426. };
  427. static void dce_disp_clk_construct(
  428. struct dce_disp_clk *clk_dce,
  429. struct dc_context *ctx,
  430. const struct dce_disp_clk_registers *regs,
  431. const struct dce_disp_clk_shift *clk_shift,
  432. const struct dce_disp_clk_mask *clk_mask)
  433. {
  434. struct display_clock *base = &clk_dce->base;
  435. base->ctx = ctx;
  436. base->funcs = &dce_funcs;
  437. clk_dce->regs = regs;
  438. clk_dce->clk_shift = clk_shift;
  439. clk_dce->clk_mask = clk_mask;
  440. clk_dce->dfs_bypass_disp_clk = 0;
  441. clk_dce->gpu_pll_ss_percentage = 0;
  442. clk_dce->gpu_pll_ss_divider = 1000;
  443. clk_dce->ss_on_gpu_pll = false;
  444. base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
  445. base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
  446. dce_clock_read_integrated_info(clk_dce);
  447. dce_clock_read_ss_info(clk_dce);
  448. dce_divider_range_construct(
  449. &clk_dce->divider_ranges[DIVIDER_RANGE_01],
  450. DIVIDER_RANGE_01_START,
  451. DIVIDER_RANGE_01_STEP_SIZE,
  452. DIVIDER_RANGE_01_BASE_DIVIDER_ID,
  453. DIVIDER_RANGE_02_BASE_DIVIDER_ID);
  454. dce_divider_range_construct(
  455. &clk_dce->divider_ranges[DIVIDER_RANGE_02],
  456. DIVIDER_RANGE_02_START,
  457. DIVIDER_RANGE_02_STEP_SIZE,
  458. DIVIDER_RANGE_02_BASE_DIVIDER_ID,
  459. DIVIDER_RANGE_03_BASE_DIVIDER_ID);
  460. dce_divider_range_construct(
  461. &clk_dce->divider_ranges[DIVIDER_RANGE_03],
  462. DIVIDER_RANGE_03_START,
  463. DIVIDER_RANGE_03_STEP_SIZE,
  464. DIVIDER_RANGE_03_BASE_DIVIDER_ID,
  465. DIVIDER_RANGE_MAX_DIVIDER_ID);
  466. }
  467. struct display_clock *dce_disp_clk_create(
  468. struct dc_context *ctx,
  469. const struct dce_disp_clk_registers *regs,
  470. const struct dce_disp_clk_shift *clk_shift,
  471. const struct dce_disp_clk_mask *clk_mask)
  472. {
  473. struct dce_disp_clk *clk_dce = dm_alloc(sizeof(*clk_dce));
  474. if (clk_dce == NULL) {
  475. BREAK_TO_DEBUGGER();
  476. return NULL;
  477. }
  478. memcpy(clk_dce->max_clks_by_state,
  479. dce80_max_clks_by_state,
  480. sizeof(dce80_max_clks_by_state));
  481. dce_disp_clk_construct(
  482. clk_dce, ctx, regs, clk_shift, clk_mask);
  483. return &clk_dce->base;
  484. }
  485. struct display_clock *dce110_disp_clk_create(
  486. struct dc_context *ctx,
  487. const struct dce_disp_clk_registers *regs,
  488. const struct dce_disp_clk_shift *clk_shift,
  489. const struct dce_disp_clk_mask *clk_mask)
  490. {
  491. struct dce_disp_clk *clk_dce = dm_alloc(sizeof(*clk_dce));
  492. if (clk_dce == NULL) {
  493. BREAK_TO_DEBUGGER();
  494. return NULL;
  495. }
  496. memcpy(clk_dce->max_clks_by_state,
  497. dce110_max_clks_by_state,
  498. sizeof(dce110_max_clks_by_state));
  499. dce_disp_clk_construct(
  500. clk_dce, ctx, regs, clk_shift, clk_mask);
  501. clk_dce->base.funcs = &dce110_funcs;
  502. return &clk_dce->base;
  503. }
  504. struct display_clock *dce112_disp_clk_create(
  505. struct dc_context *ctx,
  506. const struct dce_disp_clk_registers *regs,
  507. const struct dce_disp_clk_shift *clk_shift,
  508. const struct dce_disp_clk_mask *clk_mask)
  509. {
  510. struct dce_disp_clk *clk_dce = dm_alloc(sizeof(*clk_dce));
  511. if (clk_dce == NULL) {
  512. BREAK_TO_DEBUGGER();
  513. return NULL;
  514. }
  515. memcpy(clk_dce->max_clks_by_state,
  516. dce112_max_clks_by_state,
  517. sizeof(dce112_max_clks_by_state));
  518. dce_disp_clk_construct(
  519. clk_dce, ctx, regs, clk_shift, clk_mask);
  520. clk_dce->base.funcs = &dce112_funcs;
  521. return &clk_dce->base;
  522. }
  523. void dce_disp_clk_destroy(struct display_clock **disp_clk)
  524. {
  525. struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
  526. dm_free(clk_dce);
  527. *disp_clk = NULL;
  528. }