|
@@ -3681,6 +3681,40 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void ci_save_default_power_profile(struct amdgpu_device *adev)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ struct SMU7_Discrete_GraphicsLevel *levels =
|
|
|
+ pi->smc_state_table.GraphicsLevel;
|
|
|
+ uint32_t min_level = 0;
|
|
|
+
|
|
|
+ pi->default_gfx_power_profile.activity_threshold =
|
|
|
+ be16_to_cpu(levels[0].ActivityLevel);
|
|
|
+ pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
|
|
|
+ pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
|
|
|
+ pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
|
|
|
+
|
|
|
+ pi->default_compute_power_profile = pi->default_gfx_power_profile;
|
|
|
+ pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
|
|
|
+
|
|
|
+ /* Optimize compute power profile: Use only highest
|
|
|
+ * 2 power levels (if more than 2 are available), Hysteresis:
|
|
|
+ * 0ms up, 5ms down
|
|
|
+ */
|
|
|
+ if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
|
|
|
+ min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
|
|
|
+ else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
|
|
|
+ min_level = 1;
|
|
|
+ pi->default_compute_power_profile.min_sclk =
|
|
|
+ be32_to_cpu(levels[min_level].SclkFrequency);
|
|
|
+
|
|
|
+ pi->default_compute_power_profile.up_hyst = 0;
|
|
|
+ pi->default_compute_power_profile.down_hyst = 5;
|
|
|
+
|
|
|
+ pi->gfx_power_profile = pi->default_gfx_power_profile;
|
|
|
+ pi->compute_power_profile = pi->default_compute_power_profile;
|
|
|
+}
|
|
|
+
|
|
|
static int ci_init_smc_table(struct amdgpu_device *adev)
|
|
|
{
|
|
|
struct ci_power_info *pi = ci_get_pi(adev);
|
|
@@ -3826,6 +3860,8 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
+ ci_save_default_power_profile(adev);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -6688,6 +6724,218 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
|
|
|
+ struct amd_pp_profile *query)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+
|
|
|
+ if (!pi || !query)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (query->type == AMD_PP_GFX_PROFILE)
|
|
|
+ memcpy(query, &pi->gfx_power_profile,
|
|
|
+ sizeof(struct amd_pp_profile));
|
|
|
+ else if (query->type == AMD_PP_COMPUTE_PROFILE)
|
|
|
+ memcpy(query, &pi->compute_power_profile,
|
|
|
+ sizeof(struct amd_pp_profile));
|
|
|
+ else
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
|
|
|
+ struct amd_pp_profile *request)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ struct ci_dpm_table *dpm_table = &(pi->dpm_table);
|
|
|
+ struct SMU7_Discrete_GraphicsLevel *levels =
|
|
|
+ pi->smc_state_table.GraphicsLevel;
|
|
|
+ uint32_t array = pi->dpm_table_start +
|
|
|
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
|
|
|
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
|
|
|
+ SMU7_MAX_LEVELS_GRAPHICS;
|
|
|
+ uint32_t i;
|
|
|
+
|
|
|
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
|
|
|
+ levels[i].ActivityLevel =
|
|
|
+ cpu_to_be16(request->activity_threshold);
|
|
|
+ levels[i].EnabledForActivity = 1;
|
|
|
+ levels[i].UpH = request->up_hyst;
|
|
|
+ levels[i].DownH = request->down_hyst;
|
|
|
+ }
|
|
|
+
|
|
|
+ return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
|
|
|
+ array_size, pi->sram_end);
|
|
|
+}
|
|
|
+
|
|
|
+static void ci_find_min_clock_masks(struct amdgpu_device *adev,
|
|
|
+ uint32_t *sclk_mask, uint32_t *mclk_mask,
|
|
|
+ uint32_t min_sclk, uint32_t min_mclk)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ struct ci_dpm_table *dpm_table = &(pi->dpm_table);
|
|
|
+ uint32_t i;
|
|
|
+
|
|
|
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
|
|
|
+ if (dpm_table->sclk_table.dpm_levels[i].enabled &&
|
|
|
+ dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
|
|
|
+ *sclk_mask |= 1 << i;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
|
|
|
+ if (dpm_table->mclk_table.dpm_levels[i].enabled &&
|
|
|
+ dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
|
|
|
+ *mclk_mask |= 1 << i;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int ci_set_power_profile_state(struct amdgpu_device *adev,
|
|
|
+ struct amd_pp_profile *request)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ int tmp_result, result = 0;
|
|
|
+ uint32_t sclk_mask = 0, mclk_mask = 0;
|
|
|
+
|
|
|
+ tmp_result = ci_freeze_sclk_mclk_dpm(adev);
|
|
|
+ if (tmp_result) {
|
|
|
+ DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
|
|
|
+ result = tmp_result;
|
|
|
+ }
|
|
|
+
|
|
|
+ tmp_result = ci_populate_requested_graphic_levels(adev,
|
|
|
+ request);
|
|
|
+ if (tmp_result) {
|
|
|
+ DRM_ERROR("Failed to populate requested graphic levels!");
|
|
|
+ result = tmp_result;
|
|
|
+ }
|
|
|
+
|
|
|
+ tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
|
|
|
+ if (tmp_result) {
|
|
|
+ DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
|
|
|
+ result = tmp_result;
|
|
|
+ }
|
|
|
+
|
|
|
+ ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
|
|
|
+ request->min_sclk, request->min_mclk);
|
|
|
+
|
|
|
+ if (sclk_mask) {
|
|
|
+ if (!pi->sclk_dpm_key_disabled)
|
|
|
+ amdgpu_ci_send_msg_to_smc_with_parameter(
|
|
|
+ adev,
|
|
|
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
|
|
|
+ pi->dpm_level_enable_mask.
|
|
|
+ sclk_dpm_enable_mask &
|
|
|
+ sclk_mask);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (mclk_mask) {
|
|
|
+ if (!pi->mclk_dpm_key_disabled)
|
|
|
+ amdgpu_ci_send_msg_to_smc_with_parameter(
|
|
|
+ adev,
|
|
|
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
|
|
|
+ pi->dpm_level_enable_mask.
|
|
|
+ mclk_dpm_enable_mask &
|
|
|
+ mclk_mask);
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
+static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
|
|
|
+ struct amd_pp_profile *request)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ int ret = -1;
|
|
|
+
|
|
|
+ if (!pi || !request)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (adev->pm.dpm.forced_level !=
|
|
|
+ AMD_DPM_FORCED_LEVEL_AUTO)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (request->min_sclk ||
|
|
|
+ request->min_mclk ||
|
|
|
+ request->activity_threshold ||
|
|
|
+ request->up_hyst ||
|
|
|
+ request->down_hyst) {
|
|
|
+ if (request->type == AMD_PP_GFX_PROFILE)
|
|
|
+ memcpy(&pi->gfx_power_profile, request,
|
|
|
+ sizeof(struct amd_pp_profile));
|
|
|
+ else if (request->type == AMD_PP_COMPUTE_PROFILE)
|
|
|
+ memcpy(&pi->compute_power_profile, request,
|
|
|
+ sizeof(struct amd_pp_profile));
|
|
|
+ else
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (request->type == pi->current_power_profile)
|
|
|
+ ret = ci_set_power_profile_state(
|
|
|
+ adev,
|
|
|
+ request);
|
|
|
+ } else {
|
|
|
+ /* set power profile if it exists */
|
|
|
+ switch (request->type) {
|
|
|
+ case AMD_PP_GFX_PROFILE:
|
|
|
+ ret = ci_set_power_profile_state(
|
|
|
+ adev,
|
|
|
+ &pi->gfx_power_profile);
|
|
|
+ break;
|
|
|
+ case AMD_PP_COMPUTE_PROFILE:
|
|
|
+ ret = ci_set_power_profile_state(
|
|
|
+ adev,
|
|
|
+ &pi->compute_power_profile);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!ret)
|
|
|
+ pi->current_power_profile = request->type;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
|
|
|
+ struct amd_pp_profile *request)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+
|
|
|
+ if (!pi || !request)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (request->type == AMD_PP_GFX_PROFILE) {
|
|
|
+ pi->gfx_power_profile = pi->default_gfx_power_profile;
|
|
|
+ return ci_dpm_set_power_profile_state(adev,
|
|
|
+ &pi->gfx_power_profile);
|
|
|
+ } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
|
|
|
+ pi->compute_power_profile =
|
|
|
+ pi->default_compute_power_profile;
|
|
|
+ return ci_dpm_set_power_profile_state(adev,
|
|
|
+ &pi->compute_power_profile);
|
|
|
+ } else
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+
|
|
|
+static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
|
|
|
+ enum amd_pp_profile_type type)
|
|
|
+{
|
|
|
+ struct ci_power_info *pi = ci_get_pi(adev);
|
|
|
+ struct amd_pp_profile request = {0};
|
|
|
+
|
|
|
+ if (!pi)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (pi->current_power_profile != type) {
|
|
|
+ request.type = type;
|
|
|
+ return ci_dpm_set_power_profile_state(adev, &request);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
const struct amd_ip_funcs ci_dpm_ip_funcs = {
|
|
|
.name = "ci_dpm",
|
|
|
.early_init = ci_dpm_early_init,
|
|
@@ -6730,6 +6978,10 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
|
|
|
.set_mclk_od = ci_dpm_set_mclk_od,
|
|
|
.check_state_equal = ci_check_state_equal,
|
|
|
.get_vce_clock_state = amdgpu_get_vce_clock_state,
|
|
|
+ .get_power_profile_state = ci_dpm_get_power_profile_state,
|
|
|
+ .set_power_profile_state = ci_dpm_set_power_profile_state,
|
|
|
+ .reset_power_profile_state = ci_dpm_reset_power_profile_state,
|
|
|
+ .switch_power_profile = ci_dpm_switch_power_profile,
|
|
|
};
|
|
|
|
|
|
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|