Merge tag 'amd-drm-next-6.3-2023-02-03' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.3-2023-02-03:

amdgpu:
- PCI hotplug fixes
- Allow S0ix without BIOS support
- GC11 fixes
- DCN 3.2.x fixes
- Enable freesync over PCon
- DSC fix
- DCN 3.1.4 fixes
- NBIO 4.3 fix
- Misc code cleanups and spelling fixes
- Temporarily disable S/G on DCN 2.1 and 3.1.2/3
- Fix and re-enable S/G on DCN 3.1.4
- Re-enable the AGP aperture on GMC 11.x

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230203220316.8580-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2023-02-09 14:47:15 +10:00
commit 78e9800129
44 changed files with 435 additions and 176 deletions

View file

@ -1079,20 +1079,16 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
return false;
}
#if !IS_ENABLED(CONFIG_AMD_PMC)
dev_warn_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
#else
return true;
#endif /* CONFIG_AMD_PMC */
return true;
}
#endif /* CONFIG_SUSPEND */

View file

@ -4031,7 +4031,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
amdgpu_device_unmap_mmio(adev);
if (drm_dev_is_unplugged(adev_to_drm(adev)))
amdgpu_device_unmap_mmio(adev);
}

View file

@ -2227,6 +2227,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
drm_dev_unplug(dev);
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@ -2266,8 +2268,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
drm_dev_unplug(dev);
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device

View file

@ -606,12 +606,21 @@ psp_cmd_submit_buf(struct psp_context *psp,
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
bool dev_entered;
if (psp->adev->no_hw_access)
return 0;
if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return 0;
dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
/*
* We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
* a lock in drm_dev_enter during driver unload because we must call
* drm_dev_unplug as the beginning of unload driver sequence . It is very
* crucial that userspace can't access device instances anymore.
*/
if (!dev_entered)
WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA);
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@ -676,7 +685,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
}
exit:
drm_dev_exit(idx);
if (dev_entered)
drm_dev_exit(idx);
return ret;
}

View file

@ -983,11 +983,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
if (offset == reg_access_ctrl->grbm_cntl) {
/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
writel(v, scratch_reg2);
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else if (offset == reg_access_ctrl->grbm_idx) {
/* if the target reg offset is grbm_idx, write to scratch_reg3 */
writel(v, scratch_reg3);
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else {
/*
* SCRATCH_REG0 = read/write value

View file

@ -94,7 +94,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
}
/* Exit boradcast mode */
/* Exit broadcast mode */
adev->df.funcs->enable_broadcast_mode(adev, false);
}

View file

@ -754,8 +754,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
* zero here */
WARN_ON(simd != 0);
/* type 2 wave data */
dst[(*no_fields)++] = 2;
/* type 3 wave data */
dst[(*no_fields)++] = 3;
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);

View file

@ -151,10 +151,11 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
/* Disable AGP. */
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,

View file

@ -673,6 +673,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
if (amdgpu_sriov_vf(adev))

View file

@ -177,10 +177,11 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
* these regs, and they will be programed at host.
* so skip programing these regs.
*/
/* Disable AGP. */
/* Program the AGP BAR */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);

View file

@ -162,10 +162,10 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
/* Disable AGP. */
/* Program the AGP BAR */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev)) {
/*

View file

@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
return;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
uint32_t data;
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
}
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)

View file

@ -676,7 +676,8 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_MGCG;
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;

View file

@ -106,7 +106,6 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
@ -1186,24 +1185,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
memset(pa_config, 0, sizeof(*pa_config));
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
else
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
/* AGP aperture is disabled */
if (agp_bot == agp_top) {
logical_addr_low = adev->gmc.vram_start >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
else
logical_addr_high = adev->gmc.vram_end >> 18;
} else {
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
else
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
}
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@ -1302,10 +1315,28 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
dc_link_dp_handle_link_loss(dc_link);
/* offload_work->data is from handle_hpd_rx_irq->
* schedule_hpd_rx_offload_work.this is defer handle
* for hpd short pulse. upon here, link status may be
* changed, need get latest link status from dpcd
* registers. if link status is good, skip run link
* training again.
*/
union hpd_irq_data irq_data;
memset(&irq_data, 0, sizeof(irq_data));
/* before dc_link_dp_handle_link_loss, allow new link lost handle
* request be added to work queue if link lost at end of dc_link_
* dp_handle_link_loss
*/
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
if ((dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
dc_link_check_link_loss_status(dc_link, &irq_data))
dc_link_dp_handle_link_loss(dc_link);
}
mutex_unlock(&adev->dm.dc_lock);
@ -1535,10 +1566,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
(adev->apu_flags & AMD_APU_IS_PICASSO))
init_data.flags.gpu_vm_support = true;
break;
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@ -1623,6 +1652,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
DRM_INFO("DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@ -3235,7 +3267,7 @@ static void handle_hpd_rx_irq(void *param)
union hpd_irq_data hpd_irq_data;
bool link_loss = false;
bool has_left_work = false;
int idx = aconnector->base.index;
int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@ -3377,7 +3409,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
aconnector;
}
}
@ -4572,6 +4604,17 @@ static int dm_init_microcode(struct amdgpu_device *adev)
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
u16 data_offset;
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
dev_info(adev->dev, "No object header, skipping DM\n");
return -ENOENT;
}
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@ -6334,7 +6377,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_plane_state->plane_size.surface_size.width = stream->src.width;
dc_plane_state->plane_size.chroma_size.height = stream->src.height;
dc_plane_state->plane_size.chroma_size.width = stream->src.width;
dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->rotation = ROTATION_ANGLE_0;
@ -7113,6 +7155,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
aconnector->audio_inst = -1;
aconnector->pack_sdp_v1_3 = false;
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
/*
@ -7603,6 +7648,8 @@ static void update_freesync_state_on_stream(
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
bool pack_sdp_v1_3 = false;
struct amdgpu_dm_connector *aconn;
enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
if (!new_stream)
return;
@ -7638,11 +7685,27 @@ static void update_freesync_state_on_stream(
}
}
aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
if (aconn->vsdb_info.amd_vsdb_version == 1)
packet_type = PACKET_TYPE_FS_V1;
else if (aconn->vsdb_info.amd_vsdb_version == 2)
packet_type = PACKET_TYPE_FS_V2;
else if (aconn->vsdb_info.amd_vsdb_version == 3)
packet_type = PACKET_TYPE_FS_V3;
mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
&new_stream->adaptive_sync_infopacket);
}
mod_freesync_build_vrr_infopacket(
dm->freesync_module,
new_stream,
&vrr_params,
PACKET_TYPE_VRR,
packet_type,
TRANSFER_FUNC_UNKNOWN,
&vrr_infopacket,
pack_sdp_v1_3);
@ -10311,6 +10374,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
bool freesync_capable = false;
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@ -10403,6 +10467,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
}
}
as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
amdgpu_dm_connector->pack_sdp_v1_3 = true;
amdgpu_dm_connector->as_type = as_type;
amdgpu_dm_connector->vsdb_info = vsdb_info;
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
}
}
update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;

View file

@ -59,6 +59,7 @@
#include "irq_types.h"
#include "signal_types.h"
#include "amdgpu_dm_crc.h"
#include "mod_info_packet.h"
struct aux_payload;
struct set_config_cmd_payload;
enum aux_return_code_type;
@ -577,6 +578,36 @@ enum mst_progress_status {
MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
};
/**
* struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
*
* AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
* struct is useful to keep track of the display-specific information about
* FreeSync.
*/
struct amdgpu_hdmi_vsdb_info {
/**
* @amd_vsdb_version: Vendor Specific Data Block Version, should be
* used to determine which Vendor Specific InfoFrame (VSIF) to send.
*/
unsigned int amd_vsdb_version;
/**
* @freesync_supported: FreeSync Supported.
*/
bool freesync_supported;
/**
* @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
*/
unsigned int min_refresh_rate_hz;
/**
* @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
*/
unsigned int max_refresh_rate_hz;
};
struct amdgpu_dm_connector {
struct drm_connector base;
@ -649,6 +680,11 @@ struct amdgpu_dm_connector {
/* Automated testing */
bool timing_changed;
struct dc_crtc_timing *timing_requested;
/* Adaptive Sync */
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@ -719,37 +755,6 @@ struct dm_connector_state {
uint64_t pbn;
};
/**
* struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
*
* AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
* struct is useful to keep track of the display-specific information about
* FreeSync.
*/
struct amdgpu_hdmi_vsdb_info {
/**
* @amd_vsdb_version: Vendor Specific Data Block Version, should be
* used to determine which Vendor Specific InfoFrame (VSIF) to send.
*/
unsigned int amd_vsdb_version;
/**
* @freesync_supported: FreeSync Supported.
*/
bool freesync_supported;
/**
* @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
*/
unsigned int min_refresh_rate_hz;
/**
* @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
*/
unsigned int max_refresh_rate_hz;
};
#define to_dm_connector_state(x)\
container_of((x), struct dm_connector_state, base)

View file

@ -403,6 +403,7 @@ bool dm_helpers_dp_mst_start_top_mgr(
bool boot)
{
struct amdgpu_dm_connector *aconnector = link->priv;
int ret;
if (!aconnector) {
DRM_ERROR("Failed to find connector for link!");
@ -418,7 +419,16 @@ bool dm_helpers_dp_mst_start_top_mgr(
DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
return false;
}
DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0],
aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK);
return true;
}
bool dm_helpers_dp_mst_stop_top_mgr(
@ -1133,3 +1143,36 @@ void dm_helpers_dp_mst_update_branch_bandwidth(
// TODO
}
static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
{
bool ret_val = false;
switch (branch_dev_id) {
case DP_BRANCH_DEVICE_ID_0060AD:
ret_val = true;
break;
default:
break;
}
return ret_val;
}
enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
{
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
switch (dpcd_caps->dongle_type) {
case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true &&
dpcd_caps->allow_invalid_MSA_timing_param == true &&
dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id))
as_type = FREESYNC_TYPE_PCON_IN_WHITELIST;
break;
default:
break;
}
return as_type;
}

View file

@ -177,6 +177,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
if (dc_link->sink_count)
dc_link_remove_remote_sink(dc_link, dc_sink);
DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
dc_sink, dc_link->sink_count);
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@ -308,6 +311,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
dc_sink, aconnector->dc_link->sink_count);
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
}
@ -341,6 +347,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
dc_sink, aconnector->dc_link->sink_count);
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
@ -458,6 +467,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
if (aconnector->dc_link->sink_count)
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
aconnector->dc_link, aconnector->dc_link->sink_count);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;

View file

@ -572,10 +572,11 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
{
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_fclk = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t min_pstate = 0, min_fclk = clock_table->DfPstateTable[0].FClk;
int i;
/* Find highest valid fclk pstate */
/* Find highest and lowest valid fclk pstate */
for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
clock_table->DfPstateTable[i].FClk > max_fclk) {
@ -584,6 +585,14 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
}
}
for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
clock_table->DfPstateTable[i].FClk < min_fclk) {
min_fclk = clock_table->DfPstateTable[i].FClk;
min_pstate = i;
}
}
/* We expect the table to contain at least one valid fclk entry. */
ASSERT(is_valid_clock_value(max_fclk));
@ -599,15 +608,17 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
uint32_t max_level_fclk = clock_table->DfPstateTable[0].FClk;
uint32_t max_level_pstate = 0;
int j;
/* Look for the maximum supported FCLK for the current voltage. */
for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
clock_table->DfPstateTable[j].FClk < min_fclk &&
clock_table->DfPstateTable[j].FClk > max_level_fclk &&
clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
min_fclk = clock_table->DfPstateTable[j].FClk;
min_pstate = j;
max_level_fclk = clock_table->DfPstateTable[j].FClk;
max_level_pstate = j;
}
}
@ -621,15 +632,15 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
/* Now update clocks we do read */
bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].fclk_mhz = max_level_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_level_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_level_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
clock_table->DfPstateTable[min_pstate].WckRatio);
clock_table->DfPstateTable[max_level_pstate].WckRatio);
}
/* Make sure to include at least one entry at highest pstate */

View file

@ -87,16 +87,6 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
bool should_disable_otg(struct pipe_ctx *pipe)
{
bool ret = true;
if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled &&
pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc))
ret = false;
return ret;
}
static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
@ -108,16 +98,12 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
/* This w/a should not trigger when we have a dig active */
if (should_disable_otg(pipe)) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
dc_is_virtual_signal(pipe->stream->signal))) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
}

View file

@ -46,7 +46,6 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "dsc.h"
#include "opp.h"
#include "hw/clk_mgr.h"
#include "dce/dmub_psr.h"
#include "dmub/dmub_srv.h"

View file

@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.220"
#define DC_VER "3.2.221"
#define MAX_SURFACES 3
#define MAX_PLANES 6

View file

@ -797,6 +797,29 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
#define DC_DSC_QP_SET_SIZE 15
#define DC_DSC_RC_BUF_THRESH_SIZE 14
struct dc_dsc_rc_params_override {
int32_t rc_model_size;
int32_t rc_buf_thresh[DC_DSC_RC_BUF_THRESH_SIZE];
int32_t rc_minqp[DC_DSC_QP_SET_SIZE];
int32_t rc_maxqp[DC_DSC_QP_SET_SIZE];
int32_t rc_offset[DC_DSC_QP_SET_SIZE];
int32_t rc_tgt_offset_hi;
int32_t rc_tgt_offset_lo;
int32_t rc_edge_factor;
int32_t rc_quant_incr_limit0;
int32_t rc_quant_incr_limit1;
int32_t initial_fullness_offset;
int32_t initial_delay;
int32_t flatness_min_qp;
int32_t flatness_max_qp;
int32_t flatness_det_thresh;
};
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
uint32_t num_slices_v; /* Number of DSC slices - vertical */
@ -811,6 +834,7 @@ struct dc_dsc_config {
#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
};
/**

View file

@ -433,6 +433,9 @@ void dc_link_dp_handle_link_loss(struct dc_link *link);
bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
bool dc_link_check_link_loss_status(struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
enum dc_status dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
struct dc_sink_init_data;
struct dc_sink *dc_link_add_remote_sink(
@ -627,4 +630,31 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
/*
* USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
*/
/*
* Send a request from DP-Tx requesting to allocate BW remotely after
* allocating it locally. This will get processed by CM and a CB function
* will be called.
*
* @link: pointer to the dc_link struct instance
* @req_bw: The requested bw in Kbyte to allocated
*
* return: none
*/
void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
* CB function for when the status of the Req above is complete. We will
* find out the result of allocating on CM and update structs accordingly
*
* @link: pointer to the dc_link struct instance
* @bw: Allocated or Estimated BW depending on the result
* @result: Response type
*
* return: none
*/
void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result);
#endif /* DC_LINK_H_ */

View file

@ -2899,7 +2899,7 @@ void dcn10_blank_pixel_data(
dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
} else {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank) {
stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);

View file

@ -28,6 +28,7 @@
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
@ -344,10 +345,38 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
{
uint8_t i;
rc->rc_model_size = override->rc_model_size;
for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
rc->qp_min[i] = override->rc_minqp[i];
rc->qp_max[i] = override->rc_maxqp[i];
rc->ofs[i] = override->rc_offset[i];
}
rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
rc->rc_edge_factor = override->rc_edge_factor;
rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
rc->initial_fullness_offset = override->initial_fullness_offset;
rc->initial_xmit_delay = override->initial_delay;
rc->flatness_min_qp = override->flatness_min_qp;
rc->flatness_max_qp = override->flatness_max_qp;
rc->flatness_det_thresh = override->flatness_det_thresh;
}
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
struct rc_params rc;
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
@ -412,7 +441,12 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) {
calc_rc_params(&rc, &dsc_reg_vals->pps);
if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}

View file

@ -1777,6 +1777,15 @@ static void dcn20_program_pipe(
&pipe_ctx->stream->bit_depth_params,
&pipe_ctx->stream->clamping);
}
/* Set ABM pipe after other pipe configurations done */
if (pipe_ctx->plane_state->visible) {
if (pipe_ctx->stream_res.abm) {
dc->hwss.set_pipe(pipe_ctx);
pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
pipe_ctx->stream->abm_level);
}
}
}
void dcn20_program_front_end_for_ctx(

View file

@ -2225,14 +2225,10 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
enum swizzle_mode_values swizzle = DC_SW_LINEAR;
plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
if (bpp == 64)
swizzle = DC_SW_64KB_D;
else
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
plane_state->tiling_info.gfx9.swizzle = swizzle;
return DC_OK;
}

View file

@ -1393,15 +1393,13 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
plane_state->dcc.enable = 1;
/* align to our worst case block width */
plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
}
result = dcn20_patch_unknown_plane_state(plane_state);
return result;
return dcn20_patch_unknown_plane_state(plane_state);
}
static const struct resource_funcs dcn21_res_pool_funcs = {

View file

@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
.argb8888 = 167,
.argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;

View file

@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
.does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,

View file

@ -1714,7 +1714,6 @@ static bool dcn321_resource_construct(
dc->caps.mall_size_per_mem_channel * 1024 * 1024;
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;

View file

@ -199,6 +199,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
enum set_config_status *operation_result);
enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);
enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid);

View file

@ -949,7 +949,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int plane_count;
int i;
unsigned int optimized_min_dst_y_next_start_us;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
plane_count = 0;
optimized_min_dst_y_next_start_us = 0;
@ -974,6 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -986,18 +987,17 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
}
}
}
/* zstate only supported on PWRSEQ0 and when there's <2 planes*/
if (link->link_index != 0 || stream_status->plane_count > 1)
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else if (allow_z8) {
return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
} else {
return DCN_ZSTATE_SUPPORT_DISALLOW;
}

View file

@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :

View file

@ -1400,7 +1400,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
/* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
}
} else if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
} else {
/* SUBVP: phantom surfaces only stored in MALL */
context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
}
@ -2126,6 +2126,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
* UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
*/
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
} else {
/* Set A:
* All clocks min.

View file

@ -46,7 +46,10 @@ struct dsc_parameters {
uint32_t rc_buffer_model_size;
};
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params);
struct rc_params;
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
const struct rc_params *rc,
struct dsc_parameters *dsc_params);
#endif

View file

@ -95,19 +95,19 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i];
}
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
const struct rc_params *rc,
struct dsc_parameters *dsc_params)
{
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
unsigned long long tmp;
calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
copy_pps_fields(&dsc_cfg, &dsc_params->pps);
copy_rc_to_cfg(&dsc_cfg, &rc);
copy_rc_to_cfg(&dsc_cfg, rc);
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;

View file

@ -44,30 +44,6 @@ enum bw_type {
*/
bool set_dptx_usb4_bw_alloc_support(struct dc_link *link);
/*
* Send a request from DP-Tx requesting to allocate BW remotely after
* allocating it locally. This will get processed by CM and a CB function
* will be called.
*
* @link: pointer to the dc_link struct instance
* @req_bw: The requested bw in Kbyte to allocated
*
* return: none
*/
void set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
* CB function for when the status of the Req above is complete. We will
* find out the result of allocating on CM and update structs accordingly
*
* @link: pointer to the dc_link struct instance
* @bw: Allocated or Estimated BW depending on the result
* @result: Response type
*
* return: none
*/
void get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result);
/*
* Return the response_ready flag from dc_link struct
*

View file

@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
/* reset the cache of the last wptr as well now that hw is reset */
dmub->inbox1_last_wptr = 0;
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
dmub->inbox1_rb.wrpt = 0;
dmub->inbox1_rb.rptr = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
dmub->outbox1_rb.rptr = 0;
dmub->hw_init = false;
return DMUB_STATUS_OK;

View file

@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
#define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8
#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD
#define DP_BRANCH_HW_REV_10 0x10
#define DP_BRANCH_HW_REV_20 0x20

View file

@ -44,8 +44,8 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
enum adaptive_sync_type {
ADAPTIVE_SYNC_TYPE_NONE = 0,
ADAPTIVE_SYNC_TYPE_DP = 1,
ADAPTIVE_SYNC_TYPE_PCON_IN_WHITELIST = 2,
ADAPTIVE_SYNC_TYPE_PCON_NOT_IN_WHITELIST = 3,
FREESYNC_TYPE_PCON_IN_WHITELIST = 2,
FREESYNC_TYPE_PCON_NOT_IN_WHITELIST = 3,
ADAPTIVE_SYNC_TYPE_EDP = 4,
};

View file

@ -533,11 +533,11 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
if (stream != NULL)
mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
break;
case ADAPTIVE_SYNC_TYPE_PCON_IN_WHITELIST:
case FREESYNC_TYPE_PCON_IN_WHITELIST:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
case ADAPTIVE_SYNC_TYPE_NONE:
case ADAPTIVE_SYNC_TYPE_PCON_NOT_IN_WHITELIST:
case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST:
default:
break;
}

View file

@ -2007,14 +2007,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2)))
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2)))
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)

View file

@ -1497,6 +1497,20 @@ static int smu_disable_dpms(struct smu_context *smu)
}
}
/*
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
* for gpu reset case. Driver involvement is unnecessary.
*/
if (amdgpu_in_reset(adev)) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
return 0;
default:
break;
}
}
/*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.