drm/amd/display: fix initial bounding box values for dcn3.02

[Why]
Initial bounding box values are updated in dcn30_update_bw_bounding_box
 but they use dcn3_0_soc and dcn3_0_ip instead of dcn3_02_soc and
 dcn3_02_ip

[How]
Add dcn302_update_bw_bounding_box and
 dcn302_get_optimal_dcfclk_fclk_for_uclk so it uses
 dcn3_02_soc and dcn3_02_ip.
Use sr_exit_time_us, sr_enter_plus_exit_time_us,
 from dcn30 on dcn302 to fix flicker on eDP.
 Also use dram_clock_change_latency_us from dcn30.

Signed-off-by: Samson Tam <Samson.Tam@amd.com>
Reviewed-by: Joshua Aberback <Joshua.Aberback@amd.com>
Acked-by: Anson Jacob <Anson.Jacob@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Samson Tam 2021-01-12 11:27:05 -05:00 committed by Alex Deucher
parent 43c7887313
commit 163e3bcbca
3 changed files with 184 additions and 10 deletions

View file

@ -2334,16 +2334,28 @@ bool dcn30_validate_bandwidth(struct dc *dc,
return out;
}
static noinline void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
/*
* This must be noinline to ensure anything that deals with FP registers
* is contained within this call; previously our compiling with hard-float
* would result in fp instructions being emitted outside of the boundaries
* of the DC_FP_START/END macros, which makes sense as the compiler has no
* idea about what is wrapped and what is not
*
* This is largely just a workaround to avoid breakage introduced with 5.6,
* ideally all fp-using code should be moved into its own file, only that
* should be compiled with hard-float, and all code exported from there
* should be strictly wrapped with DC_FP_START/END
*/
static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
@ -2402,7 +2414,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {

View file

@ -54,6 +54,7 @@
#include "dce/dce_panel_cntl.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
#include "clk_mgr.h"
#include "hw_sequencer_private.h"
#include "reg_helper.h"
@ -163,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
.sr_exit_time_us = 5.20,
.sr_enter_plus_exit_time_us = 9.60,
.sr_exit_time_us = 12,
.sr_enter_plus_exit_time_us = 20,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@ -191,7 +192,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.dram_clock_change_latency_us = 350,
.dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
@ -1229,6 +1230,165 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
unsigned int *optimal_dcfclk,
unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
if (optimal_fclk)
*optimal_fclk = bw_from_dram /
(dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
if (optimal_dcfclk)
*optimal_dcfclk = bw_from_dram /
(dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
unsigned int num_dcfclk_sta_targets = 4;
unsigned int num_uclk_states;
if (dc->ctx->dc_bios->vram_info.num_chans)
dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
if (bw_params->clk_table.entries[0].memclk_mhz) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
}
if (!max_dcfclk_mhz)
max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
if (!max_dispclk_mhz)
max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
if (!max_dppclk_mhz)
max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
if (!max_phyclk_mhz)
max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
/* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
num_dcfclk_sta_targets++;
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
/* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
dcfclk_sta_targets[i] = max_dcfclk_mhz;
break;
}
}
/* Update size of array since we "removed" duplicates */
num_dcfclk_sta_targets = i + 1;
}
num_uclk_states = bw_params->clk_table.num_entries;
/* Calculate optimal dcfclk for each uclk */
for (i = 0; i < num_uclk_states; i++) {
dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
}
/* Calculate optimal uclk for each dcfclk sta target */
for (i = 0; i < num_dcfclk_sta_targets; i++) {
for (j = 0; j < num_uclk_states; j++) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
optimal_uclk_for_dcfclk_sta_targets[i] =
bw_params->clk_table.entries[j].memclk_mhz * 16;
break;
}
}
}
i = 0;
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
j = num_uclk_states;
}
}
}
while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
}
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
}
dcn3_02_soc.num_states = num_states;
for (i = 0; i < dcn3_02_soc.num_states; i++) {
dcn3_02_soc.clock_limits[i].state = i;
dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
/* Fill all states with max values of all other clocks */
dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
if (dc->current_state)
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
}
}
static struct resource_funcs dcn302_res_pool_funcs = {
.destroy = dcn302_destroy_resource_pool,
.link_enc_create = dcn302_link_encoder_create,
@ -1245,7 +1405,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};

View file

@ -30,4 +30,6 @@
struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
#endif /* _DCN302_RESOURCE_H_ */