drm fixes for 5.11-rc1

dma-buf:
 - fix build on mips
 
 komeda:
 - fix commit tail operation order
 - NULL pointer fix
 - out of bounds access fix
 
 ttm:
 - remove an unused function
 
 amdgpu:
 - Vangogh SMU fixes
 - Arcturus gfx9 fixes
 - Misc display fixes
 - Sienna Cichlid SMU update
 - Fix S3 display memory leak
 - Fix regression caused by DP sub-connector support
 
 amdkfd:
 - Properly require pcie atomics for gfx10
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJf4/x+AAoJEAx081l5xIa+ploQAKpuMuRk8pQC52XoOBaokAI+
 6otI5kvapsPzcWRfQnDxVi7V0B+/1HwIn1hg5rI4I7J3pzMQk4qMQ2oHUpaPV2Cs
 MAmVqqig1RDhWnRNDc0egVqr6CrVZf1O/8sPhfJLeDIz8cEtGjKvhO8zChWwHnjC
 f+xPMSItk90743G18dgsOtkTo75Q051txoXQtqFieuezpjwZoyIQpEJSmCnhkQpj
 807MpuFhxNT7Tav0Uti/ByeklT6XwDBg3WyiKAr9r04V20Ch0YTuDgcsQ+sjZnqV
 aCZaZH/vpl5IF3D6l+q/6lS7EVKDfCw0qzvx6lViraq/7lUVJ2m7ILO/IsdLlM2O
 qCvxVF79ZV4bln/kiEuAL9lxxtYl/L+m+x0PMNY9xMOWPMfuJlqu2IH2bZhKrekb
 LXfgBQQYsWz2s4jf04JQm7i7hgivAdth+eNztaHxjtaT/cQq3vPsDtQGdI18OX8S
 JTiHLsDYwpJ3W7daUPR0dkccJoBBqM1I+TmOzRzHFpr/Em7Q2qQfch+3rA03o/qW
 eXIN33kYIBdobhxH2LSMNjZ/gKZ9+QKdV0LCAakB6zDj+TVm3//CR0FgIWwk9V3t
 3dgfVtKDtXBbfn+lsWGPH12NcjJo/bCFqTFArus4T/V3z7ydwRlCWRRcypea9HPl
 lCTNpCSyreBcDnfhEC0B
 =6+QG
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2020-12-24' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Xmas eve pull request present.

  Just some fixes that trickled in this past week: Mostly amdgpu fixes,
  with a dma-buf/mips build fix and some misc komeda fixes.

  dma-buf:
   - fix build on mips

  komeda:
   - fix commit tail operation order
   - NULL pointer fix
   - out of bounds access fix

  ttm:
   - remove an unused function

  amdgpu:
   - Vangogh SMU fixes
   - Arcturus gfx9 fixes
   - Misc display fixes
   - Sienna Cichlid SMU update
   - Fix S3 display memory leak
   - Fix regression caused by DP sub-connector support

  amdkfd:
   - Properly require pcie atomics for gfx10"

* tag 'drm-next-2020-12-24' of git://anongit.freedesktop.org/drm/drm: (31 commits)
  drm/amd/display: Fix memory leaks in S3 resume
  drm/amdgpu: Fix a copy-pasta comment
  drm/amdgpu: only set DP subconnector type on DP and eDP connectors
  drm/amd/pm: bump Sienna Cichlid smu_driver_if version to match latest pmfw
  drm/amd/display: add getter routine to retrieve mpcc mux
  drm/amd/display: always program DPPDTO unless not safe to lower
  drm/amd/display: [FW Promotion] Release 0.0.47
  drm/amd/display: updated wm table for Renoir
  drm/amd/display: Acquire DSC during split stream for ODM only if top_pipe
  drm/amd/display: Multi-display underflow observed
  drm/amd/display: Remove unnecessary NULL check
  drm/amd/display: Update RN/VGH active display count workaround
  drm/amd/display: change SMU repsonse timeout to 2s.
  drm/amd/display: gradually ramp ABM intensity
  drm/amd/display: To modify the condition in indicating branch device
  drm/amd/display: Modify the hdcp device count check condition
  drm/amd/display: Interfaces for hubp blank and soft reset
  drm/amd/display: handler not correctly checked at remove_irq_handler
  drm/amdgpu: check gfx pipe availability before toggling its interrupts
  drm/amdgpu: remove unnecessary asic type check
  ...
This commit is contained in:
Linus Torvalds 2020-12-24 12:14:29 -08:00
commit ef2c8b81b8
38 changed files with 182 additions and 132 deletions

View File

@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct cma_heap {

View File

@ -1414,10 +1414,12 @@ out:
pm_runtime_put_autosuspend(connector->dev->dev);
}
drm_dp_set_subconnector_property(&amdgpu_connector->base,
ret,
amdgpu_dig_connector->dpcd,
amdgpu_dig_connector->downstream_ports);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
drm_dp_set_subconnector_property(&amdgpu_connector->base,
ret,
amdgpu_dig_connector->dpcd,
amdgpu_dig_connector->downstream_ports);
return ret;
}

View File

@ -5069,8 +5069,7 @@ out:
* @pdev: pointer to PCI device
*
* Called when the error recovery driver tells us that its
* OK to resume normal operation. Use completion to allow
* halted scsi ops to resume.
* OK to resume normal operation.
*/
void amdgpu_pci_resume(struct pci_dev *pdev)
{

View File

@ -496,7 +496,8 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
break;
}
if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
if (amdgpu_sriov_vf(adev) ||
!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
size = 0;
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);

View File

@ -1647,7 +1647,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
/* No CPG in Arcturus */
if (adev->asic_type != CHIP_ARCTURUS) {
if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
if (r)
return r;
@ -2633,7 +2633,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
u32 tmp;
/* don't toggle interrupts that are only applicable
* to me0 pipe0 on AISCs that have me0 removed */
if (!adev->gfx.num_gfx_rings)
return;
tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
@ -3822,7 +3829,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type != CHIP_ARCTURUS) {
if (adev->gfx.num_gfx_rings) {
/* legacy firmware loading */
r = gfx_v9_0_cp_gfx_load_microcode(adev);
if (r)
@ -3838,7 +3845,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
if (adev->asic_type != CHIP_ARCTURUS) {
if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_cp_gfx_resume(adev);
if (r)
return r;
@ -3848,7 +3855,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
if (adev->asic_type != CHIP_ARCTURUS) {
if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
@ -3884,7 +3891,7 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
if (adev->asic_type != CHIP_ARCTURUS)
if (adev->gfx.num_gfx_rings)
gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
@ -4025,7 +4032,7 @@ static int gfx_v9_0_soft_reset(void *handle)
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
if (adev->asic_type != CHIP_ARCTURUS)
if (adev->gfx.num_gfx_rings)
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);

View File

@ -1577,13 +1577,10 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
if (adev->asic_type != CHIP_ARCTURUS) {
/* Lockout access through VGA aperture*/
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
/* Lockout access through VGA aperture*/
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
amdgpu_device_program_register_sequence(adev,

View File

@ -422,7 +422,7 @@ static const struct kfd_device_info navi10_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@ -440,7 +440,7 @@ static const struct kfd_device_info navi12_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@ -458,7 +458,7 @@ static const struct kfd_device_info navi14_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@ -476,7 +476,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@ -494,7 +494,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@ -530,7 +530,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
.needs_pci_atomics = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,

View File

@ -2386,7 +2386,8 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_add_edid_modes(connector, aconnector->edid);
aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
drm_connector_list_update(connector);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@ -9367,7 +9368,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
}

View File

@ -165,7 +165,10 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
list);
if (ih == handler) {
if (handler == NULL)
continue;
if (ih == handler->handler) {
/* Found our handler. Remove it from the list. */
list_del(&handler->list);
handler_removed = true;

View File

@ -75,15 +75,8 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/*
* Only notify active stream or virtual stream.
* Need to notify virtual stream to work around
* headless case. HPD does not fire when system is in
* S0i2.
*/
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@ -234,12 +227,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
rn_update_clocks_update_dpp_dto(
clk_mgr,
context,
clk_mgr_base->clks.actual_dppclk_khz,
safe_to_lower);
rn_update_clocks_update_dpp_dto(
clk_mgr,
context,
clk_mgr_base->clks.actual_dppclk_khz,
safe_to_lower);
}
if (update_dispclk &&
@ -738,32 +730,32 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9.09,
.sr_enter_plus_exit_time_us = 10.14,
.sr_exit_time_us = 11.90,
.sr_enter_plus_exit_time_us = 12.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.sr_exit_time_us = 13.18,
.sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
}

View File

@ -99,7 +99,7 @@ int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
result = rn_smu_wait_for_response(clk_mgr, 10, 1000);
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);

View File

@ -74,15 +74,8 @@ int vg_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
/*
* Only notify active stream or virtual stream.
* Need to notify virtual stream to work around
* headless case. HPD does not fire when system is in
* S0i2.
*/
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}

View File

@ -2625,26 +2625,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
if (update_type != UPDATE_TYPE_FAST) {
// If changing VTG FP2: wait until back in vactive to program FP2
// Need to ensure that pipe unlock happens soon after to minimize race condition
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->top_pipe || pipe_ctx->stream != stream)
continue;
if (!pipe_ctx->update_flags.bits.global_sync)
continue;
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
}
}
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else

View File

@ -3173,13 +3173,7 @@ static void get_active_converter_info(
}
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
link->dpcd_caps.is_branch_dev = false;
}
else {
link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
}
link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:

View File

@ -1241,6 +1241,22 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
bool hubp1_in_blank(struct hubp *hubp)
{
uint32_t in_blank;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank);
return in_blank ? true : false;
}
void hubp1_soft_reset(struct hubp *hubp, bool reset)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
}
void hubp1_init(struct hubp *hubp)
{
//do nothing
@ -1272,6 +1288,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_in_blank = hubp1_in_blank,
};
/*****************************************/

View File

@ -260,6 +260,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@ -455,6 +456,7 @@
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
type HUBP_UNDERFLOW_CLEAR;\
type HUBP_IN_BLANK;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@ -772,5 +774,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_init(struct hubp *hubp);
void hubp1_read_state_common(struct hubp *hubp);
bool hubp1_in_blank(struct hubp *hubp);
void hubp1_soft_reset(struct hubp *hubp, bool reset);
#endif

View File

@ -467,6 +467,17 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
}
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
uint32_t val;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
return val;
}
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@ -483,6 +494,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,

View File

@ -200,4 +200,5 @@ void mpc1_read_mpcc_state(
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id);
#endif

View File

@ -1595,6 +1595,8 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
.validate_dml_output = hubp2_validate_dml_output,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
};

View File

@ -1586,7 +1586,10 @@ static void dcn20_program_pipe(
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
if (pipe_ctx->update_flags.bits.global_sync) {
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
&& !pipe_ctx->prev_odm_pipe) {
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
@ -1594,8 +1597,11 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@ -2570,4 +2576,4 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
{
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
}
}

View File

@ -556,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,

View File

@ -1933,7 +1933,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)

View File

@ -509,6 +509,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp3_init,
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
};
bool hubp3_construct(

View File

@ -1428,6 +1428,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.program_3dlut = mpc3_program_3dlut,
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
};

View File

@ -188,6 +188,8 @@ struct hubp_funcs {
void (*set_unbounded_requesting)(
struct hubp *hubp,
bool enable);
bool (*hubp_in_blank)(struct hubp *hubp);
void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
};

View File

@ -359,6 +359,10 @@ struct mpc_funcs {
int (*release_rmu)(struct mpc *mpc, int mpcc_id);
unsigned int (*get_mpc_out_mux)(
struct mpc *mpc,
int opp_id);
};
#endif

View File

@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
#define DMUB_FW_VERSION_GIT_HASH 0xa18e25995
#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
#define DMUB_FW_VERSION_REVISION 46
#define DMUB_FW_VERSION_REVISION 47
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0

View File

@ -128,8 +128,12 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
* Device count must be greater than or equal to tracked hdcp displays.
*/
return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}

View File

@ -207,8 +207,11 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
/* Device count must be greater than or equal to tracked hdcp displays. */
return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}

View File

@ -82,22 +82,24 @@ struct abm_parameters {
unsigned char deviation_gain;
unsigned char min_knee;
unsigned char max_knee;
unsigned short blRampReduction;
unsigned short blRampStart;
};
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
{0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
{0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
{0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
{0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
{0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC},
{0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC},
{0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC},
{0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
{0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
{0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
{0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
{0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
{0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
{0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
{0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
{0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters * const abm_settings[] = {
@ -662,6 +664,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
{
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
unsigned int set = params.set;
bool result = false;
uint32_t i, j = 0;
@ -710,6 +713,18 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.max_knee[i] = ram_table.max_knee[i];
}
if (params.backlight_ramping_override) {
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.blRampReduction[i] = params.backlight_ramping_reduction;
config.blRampStart[i] = params.backlight_ramping_start;
}
} else {
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
config.blRampReduction[i] = abm_settings[set][i].blRampReduction;
config.blRampStart[i] = abm_settings[set][i].blRampStart;
}
}
config.min_abm_backlight = ram_table.min_abm_backlight;
#if defined(CONFIG_DRM_AMD_DC_DCN)

View File

@ -39,6 +39,7 @@ enum abm_defines {
struct dmcu_iram_parameters {
unsigned int *backlight_lut_array;
unsigned int backlight_lut_array_size;
bool backlight_ramping_override;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
unsigned int min_abm_backlight;

View File

@ -30,7 +30,7 @@
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3B
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF

View File

@ -724,8 +724,13 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
struct amdgpu_device *adev = smu->adev;
if (adev->pm.fw_version >= 0x43f1700)
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
else
return 0;
}
static const struct pptable_funcs vangogh_ppt_funcs = {

View File

@ -152,7 +152,6 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
ret = 0;
for_each_available_child_of_node(np, child) {
if (of_node_name_eq(child, "pipeline")) {

View File

@ -81,10 +81,10 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}

View File

@ -137,9 +137,10 @@ komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask)
{
struct komeda_component *c = NULL;
unsigned long comp_mask_local = (unsigned long)comp_mask;
int id;
id = find_first_bit((unsigned long *)&comp_mask, 32);
id = find_first_bit(&comp_mask_local, 32);
if (id < 32)
c = komeda_pipeline_get_component(pipe, id);

View File

@ -704,10 +704,10 @@ komeda_compiz_set_input(struct komeda_compiz *compiz,
cin->layer_alpha = dflow->layer_alpha;
old_st = komeda_component_get_old_state(&compiz->base, drm_st);
WARN_ON(!old_st);
/* compare with old to check if this input has been changed */
if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
if (WARN_ON(!old_st) ||
memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
c_st->changed_active_inputs |= BIT(idx);
komeda_component_add_input(c_st, &dflow->input, idx);

View File

@ -239,21 +239,6 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
return p;
}
/* Count the number of pages available in a pool_type */
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
{
unsigned int count = 0;
struct page *p;
spin_lock(&pt->lock);
/* Only used for debugfs, the overhead doesn't matter */
list_for_each_entry(p, &pt->pages, lru)
++count;
spin_unlock(&pt->lock);
return count;
}
/* Initialize and add a pool type to the global shrinker list */
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
enum ttm_caching caching, unsigned int order)
@ -543,6 +528,20 @@ void ttm_pool_fini(struct ttm_pool *pool)
EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
{
unsigned int count = 0;
struct page *p;
spin_lock(&pt->lock);
/* Only used for debugfs, the overhead doesn't matter */
list_for_each_entry(p, &pt->pages, lru)
++count;
spin_unlock(&pt->lock);
return count;
}
/* Dump information about the different pool types */
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,