drm/amd: Fix logic error in sienna_cichlid_update_pcie_parameters()

While aligning SMU11 with SMU13 implementation an assumption was made that
`dpm_context->dpm_tables.pcie_table` was populated in dpm table initialization
like in SMU13 but it isn't.

So restore some of the original logic and instead just check for
amdgpu_device_pcie_dynamic_switching_supported() to decide whether to hardcode
values; erring on the side of performance.

Cc: stable@vger.kernel.org # 6.1+
Reported-and-tested-by: Umio Yasuno <coelacanth_dream@protonmail.com>
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/1447#note_2101382
Fixes: e701156ccc ("drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation with SMU13")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Mario Limonciello 2023-09-26 21:07:43 -05:00 committed by Alex Deucher
parent 77a66faacc
commit ade134ddae
1 changed files with 24 additions and 19 deletions

View File

@ -2089,36 +2089,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
u32 smu_pcie_arg;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
/* PCIE gen speed and lane width override */
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = max_gen_speed;
pcie_table->pcie_lane[0] = max_lane_width;
} else {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_lane[0] = min_lane_width;
}
pcie_table->pcie_gen[1] = max_gen_speed;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |