drm/amd/powerplay: revise calling chain on retrieving frequency range

This helps to maintain clear code layers and drop unnecessary
parameter.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Evan Quan 2020-06-09 16:16:56 +08:00 committed by Alex Deucher
parent c98f31d17c
commit e5ef784b1e
9 changed files with 87 additions and 59 deletions

View file

@ -1461,7 +1461,7 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
} }
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
if (ret || val > max_freq || val < min_freq) if (ret || val > max_freq || val < min_freq)
return -EINVAL; return -EINVAL;
ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val); ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);

View file

@ -911,8 +911,7 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL, low ? &clk_freq : NULL,
!low ? &clk_freq : NULL, !low ? &clk_freq : NULL);
true);
if (ret) if (ret)
return 0; return 0;
return clk_freq * 100; return clk_freq * 100;
@ -929,8 +928,7 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL, low ? &clk_freq : NULL,
!low ? &clk_freq : NULL, !low ? &clk_freq : NULL);
true);
if (ret) if (ret)
return 0; return 0;
return clk_freq * 100; return clk_freq * 100;

View file

@ -261,51 +261,25 @@ int smu_set_soft_freq_range(struct smu_context *smu,
return ret; return ret;
} }
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu,
uint32_t *min, uint32_t *max, bool lock_needed) enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{ {
uint32_t clock_limit;
int ret = 0; int ret = 0;
if (!min && !max) if (!min && !max)
return -EINVAL; return -EINVAL;
if (lock_needed) mutex_lock(&smu->mutex);
mutex_lock(&smu->mutex);
if (!smu_clk_dpm_is_enabled(smu, clk_type)) { if (smu->ppt_funcs->get_dpm_ultimate_freq)
switch (clk_type) { ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
case SMU_MCLK: clk_type,
case SMU_UCLK: min,
clock_limit = smu->smu_table.boot_values.uclk; max);
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */ mutex_unlock(&smu->mutex);
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
} else {
/*
* Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
* core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
*/
ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
if (lock_needed)
mutex_unlock(&smu->mutex);
return ret; return ret;
} }

View file

@ -720,7 +720,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value); uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed); uint32_t *min, uint32_t *max);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max); uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,

View file

@ -1082,13 +1082,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &min_sclk_freq, NULL);
if (ret) if (ret)
return ret; return ret;
smu->pstate_sclk = min_sclk_freq * 100; smu->pstate_sclk = min_sclk_freq * 100;
ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_MCLK, &min_mclk_freq, NULL);
if (ret) if (ret)
return ret; return ret;
@ -1143,7 +1143,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret; return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq); ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
@ -1185,7 +1185,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
@ -1212,7 +1212,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;

View file

@ -264,7 +264,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK: case SMU_SCLK:
/* retirve table returned paramters unit is MHz */ /* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min, &max);
if (!ret) { if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max) if (cur_value == max)
@ -434,7 +434,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
@ -468,7 +468,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
clk_type = clk_feature_map[i].clk_type; clk_type = clk_feature_map[i].clk_type;
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
@ -633,7 +633,7 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
@ -716,7 +716,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t sclk_freq = 0, uclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_freq);
if (ret) if (ret)
return ret; return ret;
@ -724,7 +724,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret) if (ret)
return ret; return ret;
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); ret = smu_v12_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &uclk_freq);
if (ret) if (ret)
return ret; return ret;

View file

@ -929,13 +929,13 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
int ret = 0; int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &min_sclk_freq, NULL);
if (ret) if (ret)
return ret; return ret;
smu->pstate_sclk = min_sclk_freq * 100; smu->pstate_sclk = min_sclk_freq * 100;
ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_MCLK, &min_mclk_freq, NULL);
if (ret) if (ret)
return ret; return ret;
@ -958,7 +958,7 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu)
#endif #endif
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
if (ret) if (ret)
return ret; return ret;
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq); ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
@ -1002,7 +1002,7 @@ static int sienna_cichlid_force_dpm_limit_value(struct smu_context *smu, bool hi
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;
@ -1029,7 +1029,7 @@ static int sienna_cichlid_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) { for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i]; clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
if (ret) if (ret)
return ret; return ret;

View file

@ -1699,6 +1699,34 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
{ {
int ret = 0, clk_id = 0; int ret = 0, clk_id = 0;
uint32_t param = 0; uint32_t param = 0;
uint32_t clock_limit;
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk;
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
return 0;
}
clk_id = smu_clk_get_index(smu, clk_type); clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) { if (clk_id < 0) {

View file

@ -321,6 +321,34 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
{ {
int ret = 0; int ret = 0;
uint32_t mclk_mask, soc_mask; uint32_t mclk_mask, soc_mask;
uint32_t clock_limit;
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk;
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
return 0;
}
if (max) { if (max) {
ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,