drm fixes for 6.4-rc6

fb-helper:
 - Fill in fb-helper vars more correctly.
 
 amdgpu:
 - S0ix fixes
 - GPU reset fixes
 - SMU13 fixes
 - SMU11 fixes
 - Misc Display fixes
 - Revert RV/RV2/PCO clock counter changes
 - Fix Stoney xclk value
 - Fix reserved vram debug info
 
 radeon:
 - Fix a potential use after free
 
 i915:
 - CDCLK voltage fix for ADL-P
 - eDP wake sync pulse fix.
 - Two error handling fixes to selftests
 
 exynos:
 - Fix wrong return in Exynos vidi driver.
 - Fix use-after-free issue to Exynos g2d driver.
 
 ast:
 - resume and modeset fixes for ast.
 
 ivpu:
 - Assorted ivpu fixes.
 
 lima:
 - lima context destroy fix.
 
 msm:
 - Fix max segment size to address splat on newer a6xx
 - Disable PSR by default w/ modparam to re-enable, since there
   still seems to be a lingering issue
 - Fix HPD issue
 - Fix issue with unitialized GMU mutex
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmSCgcQACgkQDHTzWXnE
 hr7N1A//cApwpH16jUKriCXm69aD4fPsglUmS0gF3yMCjB+XwwsP3hP4V+nO29cq
 Aouu/E2IDEqj2gwiVRVDYpn7Eklh/Ew9vvsIUnsbUWc93KsoCPG1o8rZfUomPjl+
 aITtrr5hnVOY0HRM3VfajKhT8Wwit8Wk/BsGIK/SnXZ3mQJ6q6O+OGipgQpNbAPO
 BF1+++pvVS1BGZjmkBmv3a4nvE8/tnCyOIuiZVBZqU89k7XUh2xyHMAXFVD4+GKB
 BbFJtXWpmr2nM4hsuUBQA/mDOft9TrYfRrIn0WaLcrgxTjPZqDFkV9YTxaHlK1QG
 2LLl2BZsN09nLhoM4xtFeL584MryngAv74x52URU3kR+eIwrwg9/wUoqUauhJ3lg
 7I40qKlW2QLG7qXMx4QBK1fYJYqHDQl7Zoy1xO1uEPW02SgjRCfREMjv5DlL8zHp
 XCZCH7em54/2q/x2JSe11OXdLT1rR45OBv0P+5ZkECZo1RfKrjXoPPyZ84eIVHUQ
 LF6ERXikhcooKKhki72dewzD+kxlqgwebhTuUKk8QyncGY/umJEnE4RXLDbFHfdf
 /FlWDQTEUbD3YxW4A0SGR9zJOtCgaedH7GreRCQpBdXB/7nt8NLsw1w3jlkhp4O2
 FDlZd8/l9HCvq1NR02eKZMDiBoiyOp0IxR6F+/+60KCZSPSgHPA=
 =4e1Q
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-06-09' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Bit busier and a bit more scattered than usual. amdgpu is the main
  one, with ivpu and msm having a few fixes, then i915, exynos, ast,
  lima, radeon with some misc bits, but overall nothing standing out.

  fb-helper:
   - Fill in fb-helper vars more correctly

  amdgpu:
   - S0ix fixes
   - GPU reset fixes
   - SMU13 fixes
   - SMU11 fixes
   - Misc Display fixes
   - Revert RV/RV2/PCO clock counter changes
   - Fix Stoney xclk value
   - Fix reserved vram debug info

  radeon:
   - Fix a potential use after free

  i915:
   - CDCLK voltage fix for ADL-P
   - eDP wake sync pulse fix
   - Two error handling fixes to selftests

  exynos:
   - Fix wrong return in Exynos vidi driver
   - Fix use-after-free issue to Exynos g2d driver

  ast:
   - resume and modeset fixes for ast

  ivpu:
   - Assorted ivpu fixes

  lima:
   - lima context destroy fix

  msm:
   - Fix max segment size to address splat on newer a6xx
   - Disable PSR by default w/ modparam to re-enable, since there still
     seems to be a lingering issue
   - Fix HPD issue
   - Fix issue with unitialized GMU mutex"

* tag 'drm-fixes-2023-06-09' of git://anongit.freedesktop.org/drm/drm: (32 commits)
  drm/msm/a6xx: initialize GMU mutex earlier
  drm/msm/dp: enable HDP plugin/unplugged interrupts at hpd_enable/disable
  accel/ivpu: Fix sporadic VPU boot failure
  accel/ivpu: Do not use mutex_lock_interruptible
  accel/ivpu: Do not trigger extra VPU reset if the VPU is idle
  drm/amd/display: Reduce sdp bw after urgent to 90%
  drm/amdgpu: change reserved vram info print
  drm/amdgpu: fix xclk freq on CHIP_STONEY
  drm/radeon: fix race condition UAF in radeon_gem_set_domain_ioctl
  Revert "drm/amdgpu: switch to golden tsc registers for raven/raven2"
  Revert "drm/amdgpu: Differentiate between Raven2 and Raven/Picasso according to revision id"
  Revert "drm/amdgpu: change the reference clock for raven/raven2"
  drm/amd/display: add ODM case when looking for first split pipe
  drm/amd: Make lack of `ACPI_FADT_LOW_POWER_S0` or `CONFIG_AMD_PMC` louder during suspend path
  drm/amd/pm: conditionally disable pcie lane switching for some sienna_cichlid SKUs
  drm/amd/pm: Fix power context allocation in SMU13
  drm/amdgpu: fix Null pointer dereference error in amdgpu_device_recover_vram
  drm/amd: Disallow s0ix without BIOS support again
  drm/i915/selftests: Add some missing error propagation
  drm/exynos: fix race condition UAF in exynos_g2d_exec_ioctl
  ...
This commit is contained in:
Linus Torvalds 2023-06-08 19:14:10 -07:00
commit 33f2b5785a
38 changed files with 343 additions and 239 deletions

View File

@ -7,6 +7,7 @@ config DRM_ACCEL_IVPU
depends on PCI && PCI_MSI
select FW_LOADER
select SHMEM
select GENERIC_ALLOCATOR
help
Choose this option if you have a system that has an 14th generation Intel CPU
or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated

View File

@ -197,6 +197,11 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
{
struct ivpu_hw_info *hw = vdev->hw;
@ -239,6 +244,12 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
return ret;
}
ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
}
}
return 0;
@ -256,7 +267,7 @@ static int ivpu_pll_disable(struct ivpu_device *vdev)
static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
u32 val = 0;
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
@ -754,9 +765,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (ivpu_hw_mtl_reset(vdev)) {
if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
ivpu_err(vdev, "Failed to reset the VPU\n");
ret = -EIO;
}
if (ivpu_pll_disable(vdev)) {
@ -764,8 +774,10 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
ret = -EIO;
}
if (ivpu_hw_mtl_d0i3_enable(vdev))
ivpu_warn(vdev, "Failed to enable D0I3\n");
if (ivpu_hw_mtl_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
return ret;
}

View File

@ -91,6 +91,7 @@
#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)

View File

@ -183,9 +183,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
ret = mutex_lock_interruptible(&ipc->lock);
if (ret)
return ret;
mutex_lock(&ipc->lock);
if (!ipc->on) {
ret = -EAGAIN;

View File

@ -431,6 +431,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
struct ivpu_bo *bo;
int ret;
u32 i;
@ -461,22 +462,28 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
}
}
dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
for (i = 0; i < buf_count; i++) {
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
}
unlock_reservations:
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
wmb(); /* Flush write combining buffers */

View File

@ -587,16 +587,11 @@ static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
int ret;
int ret = 0;
ret = mutex_lock_interruptible(&mmu->lock);
if (ret)
return ret;
if (!mmu->on) {
ret = 0;
mutex_lock(&mmu->lock);
if (!mmu->on)
goto unlock;
}
ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
if (ret)
@ -614,7 +609,7 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
u64 *entry;
u64 cd[4];
int ret;
int ret = 0;
if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
return -EINVAL;
@ -655,14 +650,9 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
ret = mutex_lock_interruptible(&mmu->lock);
if (ret)
return ret;
if (!mmu->on) {
ret = 0;
mutex_lock(&mmu->lock);
if (!mmu->on)
goto unlock;
}
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)

View File

@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
dev_warn_once(adev->dev,
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
dev_err_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
return false;
}
#if !IS_ENABLED(CONFIG_AMD_PMC)
dev_warn_once(adev->dev,
dev_err_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
#endif /* CONFIG_AMD_PMC */
return false;
#else
return true;
#endif /* CONFIG_AMD_PMC */
}
#endif /* CONFIG_SUSPEND */

View File

@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
* is initialized.
*/
bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}

View File

@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;

View File

@ -800,7 +800,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
@ -812,8 +812,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
list_for_each_entry(block, &mgr->reserved_pages, link)
drm_buddy_block_print(mm, block, printer);
list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
drm_printf(printer, "%#018llx-%#018llx: %llu\n",
rsv->start, rsv->start + rsv->size, rsv->size);
mutex_unlock(&mgr->lock);
}

View File

@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a
#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b
#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068
#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069
#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@ -4004,31 +3994,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
preempt_disable();
if (adev->rev_id >= 0x8) {
clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
} else {
clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
}
/* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
* roughly every 42 seconds.
*/
if (hi_check != clock_hi) {
if (adev->rev_id >= 0x8)
clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
else
clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
clock_hi = hi_check;
}
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);

View File

@ -301,10 +301,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
return reference_clock / 4;
return reference_clock;
}

View File

@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
if (adev->flags & AMD_IS_APU)
return reference_clock;
if (adev->flags & AMD_IS_APU) {
switch (adev->asic_type) {
case CHIP_STONEY:
/* vbios says 48Mhz, but the actual freq is 100Mhz */
return 10000;
default:
return reference_clock;
}
}
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))

View File

@ -1981,6 +1981,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
* dc_commit_streams - Commit current stream state
*
@ -2002,6 +2005,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_state *context;
enum dc_status res = DC_OK;
struct dc_validation_set set[MAX_STREAMS] = {0};
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
@ -2026,6 +2031,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
*/
if (stream_count == dc->res_pool->pipe_count) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
handle_exit_odm2to1 = true;
}
}
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_create_state(dc);
if (!context)
goto context_alloc_fail;
@ -3872,6 +3893,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
bool odm_in_use = false;
if (!transition_context)
return false;
@ -3900,6 +3922,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
}
}
/* If ODM is enabled and we are adding or removing planes from any ODM
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->next_odm_pipe) {
odm_in_use = true;
break;
}
}
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@ -3908,7 +3942,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
dc_release_state(transition_context);
return true;
}

View File

@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe(
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
} else if (split_pipe->prev_odm_pipe &&
split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
if (split_pipe->next_odm_pipe)
split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
if (split_pipe->prev_odm_pipe->plane_state)
resource_build_scaling_params(split_pipe->prev_odm_pipe);
memset(split_pipe, 0, sizeof(*split_pipe));
split_pipe->stream_res.tg = pool->timing_generators[i];
split_pipe->plane_res.hubp = pool->hubps[i];
split_pipe->plane_res.ipp = pool->ipps[i];
split_pipe->plane_res.dpp = pool->dpps[i];
split_pipe->stream_res.opp = pool->opps[i];
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
}

View File

@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 100.0,
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented

View File

@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;
*gen_speed_override = 0xff;
*lane_width_override = 0xff;
switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
uint32_t smu_pcie_arg;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
((table_member1[i] <= pcie_gen_cap) ?
(table_member1[i] << 8) :
(pcie_gen_cap << 8)) |
((table_member2[i] <= pcie_width_cap) ?
table_member2[i] :
pcie_width_cap);
smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
NULL);
if (ret)
return ret;
if (table_member1[i] > pcie_gen_cap)
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
if (table_member2[i] > pcie_width_cap)
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
}
return 0;

View File

@ -573,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
return 0;
}

View File

@ -119,53 +119,32 @@ err_astdp_edid_not_ready:
/*
* Launch Aspeed DP
*/
void ast_dp_launch(struct drm_device *dev, u8 bPower)
void ast_dp_launch(struct drm_device *dev)
{
u32 i = 0, j = 0, WaitCount = 1;
u8 bDPTX = 0;
u32 i = 0;
u8 bDPExecute = 1;
struct ast_device *ast = to_ast_device(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
// Wait total count by different condition.
for (j = 0; j < WaitCount; j++) {
bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK);
if (bDPTX)
break;
// Wait one second then timeout.
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
ASTDP_MCU_FW_EXECUTING) {
i++;
// wait 100 ms
msleep(100);
}
// 0xE : ASTDP with DPMCU FW handling
if (bDPTX == ASTDP_DPMCU_TX) {
// Wait one second then timeout.
i = 0;
while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, COPROCESSOR_LAUNCH) !=
COPROCESSOR_LAUNCH) {
i++;
// wait 100 ms
msleep(100);
if (i >= 10) {
// DP would not be ready.
bDPExecute = 0;
break;
}
if (i >= 10) {
// DP would not be ready.
bDPExecute = 0;
break;
}
if (bDPExecute)
ast->tx_chip_types |= BIT(AST_TX_ASTDP);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
}
if (!bDPExecute)
drm_err(dev, "Wait DPMCU executing timeout\n");
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
}

View File

@ -350,9 +350,6 @@ int ast_mode_config_init(struct ast_device *ast);
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
/* Define for Soc scratched reg */
#define COPROCESSOR_LAUNCH BIT(5)
/*
* Display Transmitter Type:
*/
@ -480,7 +477,7 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
/* aspeed DP */
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev, u8 bPower);
void ast_dp_launch(struct drm_device *dev);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);

View File

@ -254,8 +254,13 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
case 0x0c:
ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (ast->chip == AST2600)
ast_dp_launch(&ast->base, 0);
} else if (ast->chip == AST2600) {
if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
ast->tx_chip_types = AST_TX_ASTDP_BIT;
ast_dp_launch(&ast->base);
}
}
/* Print stuff for diagnostic purposes */
if (ast->tx_chip_types & AST_TX_NONE_BIT)
@ -264,6 +269,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
return 0;
}

View File

@ -1647,6 +1647,8 @@ static int ast_dp501_output_init(struct ast_device *ast)
static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
{
void *edid;
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(dev);
int succ;
int count;
@ -1655,9 +1657,17 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
if (!edid)
goto err_drm_connector_update_edid_property;
/*
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
mutex_lock(&ast->ioregs_lock);
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
goto err_kfree;
goto err_mutex_unlock;
mutex_unlock(&ast->ioregs_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@ -1665,7 +1675,8 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_kfree:
err_mutex_unlock:
mutex_unlock(&ast->ioregs_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);

View File

@ -380,7 +380,8 @@ void ast_post_gpu(struct drm_device *dev)
ast_set_def_ext_reg(dev);
if (ast->chip == AST2600) {
ast_dp_launch(dev, 1);
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_launch(dev);
} else if (ast->config_mode == ast_use_p2a) {
if (ast->chip == AST2500)
ast_post_chip_2500(dev);

View File

@ -1545,17 +1545,19 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
}
}
static void __fill_var(struct fb_var_screeninfo *var,
static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info,
struct drm_framebuffer *fb)
{
int i;
var->xres_virtual = fb->width;
var->yres_virtual = fb->height;
var->accel_flags = FB_ACCELF_TEXT;
var->accel_flags = 0;
var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
var->height = var->width = 0;
var->height = info->var.height;
var->width = info->var.width;
var->left_margin = var->right_margin = 0;
var->upper_margin = var->lower_margin = 0;
var->hsync_len = var->vsync_len = 0;
@ -1618,7 +1620,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
__fill_var(var, fb);
__fill_var(var, info, fb);
/*
* fb_pan_display() validates this, but fb_set_par() doesn't and just
@ -2074,7 +2076,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xoffset = 0;
info->var.yoffset = 0;
__fill_var(&info->var, fb);
__fill_var(&info->var, info, fb);
info->var.activate = FB_ACTIVATE_NOW;
drm_fb_helper_fill_pixel_fmt(&info->var, format);

View File

@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
if (runqueue_node->async)
if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);

View File

@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);

View File

@ -1453,6 +1453,18 @@ static u8 tgl_calc_voltage_level(int cdclk)
return 0;
}
static u8 rplu_calc_voltage_level(int cdclk)
{
if (cdclk > 556800)
return 3;
else if (cdclk > 480000)
return 2;
else if (cdclk > 312000)
return 1;
else
return 0;
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
@ -3242,6 +3254,13 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.calc_voltage_level = tgl_calc_voltage_level,
};
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
.calc_voltage_level = rplu_calc_voltage_level,
};
static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@ -3384,14 +3403,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
else if (IS_ADLP_RPLU(dev_priv))
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
} else if (IS_ADLP_RPLU(dev_priv)) {
dev_priv->display.cdclk.table = rplu_cdclk_table;
else
dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
} else {
dev_priv->display.cdclk.table = adlp_cdclk_table;
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
}
} else if (IS_ROCKETLAKE(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = rkl_cdclk_table;

View File

@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void)
static int intel_dp_aux_fw_sync_len(void)
{
int precharge = 16; /* 10-16 */
int precharge = 10; /* 10-16 */
int preamble = 8;
return precharge + preamble;

View File

@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg)
continue;
ce = intel_context_create(data[m].ce[0]->engine);
if (IS_ERR(ce))
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
err = intel_context_pin(ce);
if (err) {
@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg)
worker = kthread_create_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker))
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
goto out;
}
data[n].worker = worker;
}
@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg)
}
}
if (igt_live_test_end(&t))
err = -EIO;
if (igt_live_test_end(&t)) {
err = err ?: -EIO;
break;
}
}
out:

View File

@ -1530,8 +1530,8 @@ static int live_busywait_preempt(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
int err = -ENOMEM;
u32 *map;
int err;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
@ -1539,13 +1539,17 @@ static int live_busywait_preempt(void *arg)
*/
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
if (IS_ERR(ctx_hi))
return PTR_ERR(ctx_hi);
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
if (IS_ERR(ctx_lo)) {
err = PTR_ERR(ctx_lo);
goto err_ctx_hi;
}
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);

View File

@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context)
{
drm_sched_entity_fini(&context->base);
drm_sched_entity_destroy(&context->base);
}
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)

View File

@ -1526,8 +1526,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (!pdev)
return -ENODEV;
mutex_init(&gmu->lock);
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);

View File

@ -1981,6 +1981,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
mutex_init(&a6xx_gpu->gmu.lock);
adreno_gpu->registers = NULL;
/*

View File

@ -620,7 +620,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config & DP_DP_HPD_INT_MASK);
}
void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
@ -635,6 +635,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
}
static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
{
/* trigger sdp */

View File

@ -104,7 +104,8 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
u32 intr_mask, bool en);
void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);

View File

@ -28,6 +28,10 @@
#include "dp_audio.h"
#include "dp_debug.h"
static bool psr_enabled = false;
module_param(psr_enabled, bool, 0);
MODULE_PARM_DESC(psr_enabled, "enable PSR for eDP and DP displays");
#define HPD_STRING_SIZE 30
enum {
@ -407,7 +411,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
edid = dp->panel->edid;
dp->dp_display.psr_supported = dp->panel->psr_cap.version;
dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = drm_detect_monitor_audio(edid);
dp_panel_handle_sink_request(dp->panel);
@ -616,12 +620,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_MAINLINK_READY;
}
/* enable HDP irq_hpd/replug interrupt */
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
true);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
@ -659,12 +657,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
/* disable irq_hpd/replug interrupts */
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
false);
/* unplugged, no more irq_hpd handle */
dp_del_event(dp, EV_IRQ_HPD_INT);
@ -688,10 +680,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
/* disable HPD plug interrupts */
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
@ -707,10 +695,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(&dp->dp_display, false);
/* enable HDP plug interrupt to prepare for next plugin */
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
@ -1083,26 +1067,6 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
mutex_unlock(&dp_display->event_mutex);
}
static void dp_display_config_hpd(struct dp_display_private *dp)
{
dp_display_host_init(dp);
dp_catalog_ctrl_hpd_config(dp->catalog);
/* Enable plug and unplug interrupts only if requested */
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_HPD_PLUG_INT_MASK |
DP_DP_HPD_UNPLUG_INT_MASK,
true);
/* Enable interrupt first time
* we are leaving dp clocks on during disconnect
* and never disable interrupt
*/
enable_irq(dp->irq);
}
void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
{
struct dp_display_private *dp;
@ -1177,7 +1141,7 @@ static int hpd_event_thread(void *data)
switch (todo->event_id) {
case EV_HPD_INIT_SETUP:
dp_display_config_hpd(dp_priv);
dp_display_host_init(dp_priv);
break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
@ -1283,7 +1247,6 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp->irq, rc);
return rc;
}
disable_irq(dp->irq);
return 0;
}
@ -1395,13 +1358,8 @@ static int dp_pm_resume(struct device *dev)
/* turn on dp ctrl/phy */
dp_display_host_init(dp);
dp_catalog_ctrl_hpd_config(dp->catalog);
if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_HPD_PLUG_INT_MASK |
DP_DP_HPD_UNPLUG_INT_MASK,
true);
if (dp_display->is_edp)
dp_catalog_ctrl_hpd_enable(dp->catalog);
if (dp_catalog_link_is_connected(dp->catalog)) {
/*
@ -1569,9 +1527,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
if (aux_bus && dp->is_edp) {
dp_display_host_init(dp_priv);
dp_catalog_ctrl_hpd_config(dp_priv->catalog);
dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
dp_display_host_phy_init(dp_priv);
enable_irq(dp_priv->irq);
/*
* The code below assumes that the panel will finish probing
@ -1613,7 +1570,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
error:
if (dp->is_edp) {
disable_irq(dp_priv->irq);
dp_display_host_phy_exit(dp_priv);
dp_display_host_deinit(dp_priv);
}
@ -1802,16 +1758,31 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
/* disable HDP interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_notify(struct drm_bridge *bridge,

View File

@ -449,6 +449,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_cleanup_mode_config;
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
@ -459,8 +461,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
dma_set_max_seg_size(dev, UINT_MAX);
msm_gem_shrinker_init(ddev);
if (priv->kms_init) {

View File

@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}