drm fixes for 6.9-rc4

client:
 - Protect connector modes with mode_config mutex
 
 ast:
 - Fix soft lockup
 
 host1x:
 - Do not setup DMA for virtual addresses
 
 ivpu:
 - Fix deadlock in context_xa
 - PCI fixes
 - Fixes to error handling
 
 nouveau:
 - gsp: Fix OOB access
 - Fix casting
 
 panfrost:
 - Fix error path in MMU code
 
 qxl:
 - Revert "drm/qxl: simplify qxl_fence_wait"
 
 vmwgfx:
 - Enable DMA for SEV mappings
 
 i915:
 - Couple CDCLK programming fixes
 - HDCP related fix
 - 4 Bigjoiner related fixes
 - Fix for a circular locking around GuC on reset+wedged case
 
 xe:
 - Fix double display mutex initializations
 - Fix u32 -> u64 implicit conversions
 - Fix RING_CONTEXT_CONTROL not marked as masked
 
 msm:
 - DP refcount leak fix on disconnect
 - Add missing newlines to prints in msm_fb and msm_kms
 - fix dpu debugfs entry permissions
 - Fix the interface table for the catalog of X1E80100
 - fix irq message printing
 - Bindings fix to add DP node as child of mdss for mdss node
 - Minor typo fix in DP driver API which handles port status change
 - fix CHRASHDUMP_READ()
 - fix HHB (highest bank bit) for a619 to fix UBWC corruption
 
 amdgpu:
 - GPU reset fixes
 - Fix some confusing logging
 - UMSCH fix
 - Aborted suspend fix
 - DCN 3.5 fixes
 - S4 fix
 - MES logging fixes
 - SMU 14 fixes
 - SDMA 4.4.2 fix
 - KASAN fix
 - SMU 13.0.10 fix
 - VCN partition fix
 - GFX11 fixes
 - DWB fixes
 - Plane handling fix
 - FAMS fix
 - DCN 3.1.6 fix
 - VSC SDP fixes
 - OLED panel fix
 - GFX 11.5 fix
 
 amdkfd:
 - GPU reset fixes
 - fix ioctl integer overflow
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmYYjYoACgkQDHTzWXnE
 hr5+AhAAqM5ZXNEC5Y5zbxqLmMyr08sn4lm/fE0CNZYPoRXerBJIOTXvc8V9YL9W
 5Nxux+5qLzLrIOKdY5PIGEyq0TlA53ZyR0GSF3NfFDxI2OiBRbuRpS6obuimPRY6
 bHdfb89S3TRj3XtP7eJs3fuejygAzLDg+4AXIEF9htlXVARrEBVA2Cw1Km0goWoa
 uXhfLlt9tycIdtuedpPDnetZGlffZa1eCFqFlDJg671moEdQDfdSSsgVtkaDkAy1
 Cy1jdu4+9WHTNAS6kGHy8iB7Ebq+n6UKiBr3o9GoWTIgqVz33FeMU1zrJ2IdwNNK
 nuPBeyk7Z125VEntyj+29GdLRcKv+K75zbdoKrifYp5/937DBjpiJ5bcqOnZdDVG
 hc6IPeMWcvLJ10W+ZrtUqs7BA2cS2dO06uBKEHqNL5Th/TmOSpyKeS/JOtU9Tpkq
 b2OkGy/H+20wXFBFmanLlPuS0lrvuUkLi7bIvtqZq2e5vOHbm4xKLrCDPWFtp1sR
 ohkDtqmkGocQz5l0Ublqdcnrtttg3+Rr5Jh+cCkq5f9gEqsbQWEoPzG9dyZOOBq2
 xEtWf5enH+3J711/9FB8+aWD+j7T04a7ZEODeDDAQBwgW6gMYeGHQPKvTWC0xkYX
 8LBYvxfV1TSK+4l3geF9m+MQuZnuX1sZ5QSh7b/nfBvxMaFVJ84=
 =tDFk
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-04-12' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Looks like everyone woke up after holidays, this weeks pull has a
  bunch of stuff all over, 2 weeks worth of amdgpu is a lot of it, then
  i915/xe have a few, a bunch of msm fixes, then some scattered driver
  fixes.

  I expect things will settle down for rc5.

  client:
   - Protect connector modes with mode_config mutex

  ast:
   - Fix soft lockup

  host1x:
   - Do not setup DMA for virtual addresses

  ivpu:
   - Fix deadlock in context_xa
   - PCI fixes
   - Fixes to error handling

  nouveau:
   - gsp: Fix OOB access
   - Fix casting

  panfrost:
   - Fix error path in MMU code

  qxl:
   - Revert "drm/qxl: simplify qxl_fence_wait"

  vmwgfx:
   - Enable DMA for SEV mappings

  i915:
   - Couple CDCLK programming fixes
   - HDCP related fix
   - 4 Bigjoiner related fixes
   - Fix for a circular locking around GuC on reset+wedged case

  xe:
   - Fix double display mutex initializations
   - Fix u32 -> u64 implicit conversions
   - Fix RING_CONTEXT_CONTROL not marked as masked

  msm:
   - DP refcount leak fix on disconnect
   - Add missing newlines to prints in msm_fb and msm_kms
   - fix dpu debugfs entry permissions
   - Fix the interface table for the catalog of X1E80100
   - fix irq message printing
   - Bindings fix to add DP node as child of mdss for mdss node
   - Minor typo fix in DP driver API which handles port status change
   - fix CHRASHDUMP_READ()
   - fix HHB (highest bank bit) for a619 to fix UBWC corruption

  amdgpu:
   - GPU reset fixes
   - Fix some confusing logging
   - UMSCH fix
   - Aborted suspend fix
   - DCN 3.5 fixes
   - S4 fix
   - MES logging fixes
   - SMU 14 fixes
   - SDMA 4.4.2 fix
   - KASAN fix
   - SMU 13.0.10 fix
   - VCN partition fix
   - GFX11 fixes
   - DWB fixes
   - Plane handling fix
   - FAMS fix
   - DCN 3.1.6 fix
   - VSC SDP fixes
   - OLED panel fix
   - GFX 11.5 fix

  amdkfd:
   - GPU reset fixes
   - fix ioctl integer overflow"

* tag 'drm-fixes-2024-04-12' of https://gitlab.freedesktop.org/drm/kernel: (65 commits)
  amdkfd: use calloc instead of kzalloc to avoid integer overflow
  drm/xe: Label RING_CONTEXT_CONTROL as masked
  drm/xe/xe_migrate: Cast to output precision before multiplying operands
  drm/xe/hwmon: Cast result to output precision on left shift of operand
  drm/xe/display: Fix double mutex initialization
  drm/amdgpu: differentiate external rev id for gfx 11.5.0
  drm/amd/display: Adjust dprefclk by down spread percentage.
  drm/amd/display: Set VSC SDP Colorimetry same way for MST and SST
  drm/amd/display: Program VSC SDP colorimetry for all DP sinks >= 1.4
  drm/amd/display: fix disable otg wa logic in DCN316
  drm/amd/display: Do not recursively call manual trigger programming
  drm/amd/display: always reset ODM mode in context when adding first plane
  drm/amdgpu: fix incorrect number of active RBs for gfx11
  drm/amd/display: Return max resolution supported by DWB
  amd/amdkfd: sync all devices to wait all processes being evicted
  drm/amdgpu: clear set_q_mode_offs when VM changed
  drm/amdgpu: Fix VCN allocation in CPX partition
  drm/amd/pm: fix the high voltage issue after unload
  drm/amd/display: Skip on writeback when it's not applicable
  drm/amdgpu: implement IRQ_STATE_ENABLE for SDMA v4.4.2
  ...
This commit is contained in:
Linus Torvalds 2024-04-12 08:27:09 -07:00
commit d1c13e8004
73 changed files with 931 additions and 267 deletions

View File

@ -53,6 +53,15 @@ patternProperties:
compatible:
const: qcom,sm8150-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
contains:
const: qcom,sm8150-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
{
int ret;
ret = ivpu_rpm_get_if_active(vdev);
if (ret < 0)
return ret;
*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
if (ret)
ivpu_rpm_put(vdev);
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
ret = ivpu_get_core_clock_rate(vdev, &args->value);
args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
ivpu_prepare_for_reset(vdev);
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret;
}
@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
goto err_power_down;
goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
goto err_power_down;
goto err_shutdown;
ret = ivpu_mmu_init(vdev);
if (ret)
@ -601,10 +588,8 @@ err_mmu_rctx_fini:
ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_shutdown:
ivpu_shutdown(vdev);
err_xa_destroy:
xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@ -90,7 +90,6 @@
struct ivpu_wa_table {
bool punit_disabled;
bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;

View File

@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
}
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0);
}
@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == PLL_RATIO_4_3)
if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,

View File

@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return PLL_RATIO_TO_FREQ(ratio);
}
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
drmm_mutex_init(&vdev->drm, &ipc->lock);
ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
if (ret) {
ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
goto err_free_rx;
}
ivpu_ipc_reset(vdev);
return 0;

View File

@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
return "Unknown CMDQ command";
return "Unknown event";
}
}
@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{
switch (err) {
case IVPU_MMU_CERROR_NONE:
return "No CMDQ Error";
return "No error";
case IVPU_MMU_CERROR_ILL:
return "Illegal command";
case IVPU_MMU_CERROR_ABT:
return "External abort on CMDQ read";
return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation";
default:
return "Unknown CMDQ Error";
return "Unknown error";
}
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/highmem.h>
@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
pci_save_state(to_pci_dev(vdev->drm.dev));
ivpu_prepare_for_reset(vdev);
ret = ivpu_shutdown(vdev);
if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
return ret;
}
@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
pci_restore_state(to_pci_dev(vdev->drm.dev));
retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@ -100,6 +97,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);

View File

@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;

View File

@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev)) {
r = psp_gpu_reset(adev);
} else {
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
}
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
}
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
}
}

View File

@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:

View File

@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes,
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);
/**
* DOC: mes_log_enable (int)
* Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
* (0 = disabled (default), 1 = enabled)
*/
MODULE_PARM_DESC(mes_log_enable,
"Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.

View File

@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
dev_err(adev->dev,
"Error scheduling IBs (%d) in ring(%s)", r,
ring->name);
}
job->job_run_counter++;

View File

@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
if (!amdgpu_mes_log_enable)
return 0;
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@ -1549,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
mem, PAGE_SIZE, false);
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@ -1565,7 +1567,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
if (adev->enable_mes)
if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);

View File

@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;

View File

@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
{
return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
}
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
/* VCN is shared by two partitions under CPX MODE */
/* VCN may be shared by two partitions under CPX MODE in certain
* configs.
*/
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}

View File

@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap |= global_active_rb_bitmap;
active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}

View File

@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
mes->event_log_gpu_addr;
}
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),

View File

@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
DRAM_ECC_INT_ENABLE, 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
break;
/* sdma ecc interrupt is enabled by default
* driver doesn't need to do anything to
* enable the interrupt */
case AMDGPU_IRQ_STATE_ENABLE:
default:
break;
}
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}

View File

@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
return false;
default:
return true;
}
@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x1;
if (adev->rev_id == 0)
adev->external_rev_id = 0x1;
else
adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg1, sol_reg2;
/* Will reset for the following suspend abort cases.
* 1) Only reset dGPU side.
* 2) S3 suspend got aborted and TOS is active.
*/
if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
!adev->suspend_complete) {
sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
msleep(100);
sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
return (sol_reg1 != sol_reg2);
}
return false;
}
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (soc21_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend aborted, resetting...");
soc21_asic_reset(adev);
}
return soc21_common_hw_init(adev);
}

View File

@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
ring->wptr = 0;
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);

View File

@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
args->num_of_nodes), GFP_KERNEL);
pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
GFP_KERNEL);
if (!pa)
return -ENOMEM;

View File

@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
int count;
if (!kfd->init_complete)
return;
@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = ++kfd_locked;
mutex_unlock(&kfd_processes_mutex);
/* For first KFD device suspend all the KFD processes */
if (count == 1)
if (++kfd_locked == 1)
kfd_suspend_all_processes();
mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count, i;
int ret, i;
if (!kfd->init_complete)
return 0;
@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
count = --kfd_locked;
mutex_unlock(&kfd_processes_mutex);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
mutex_unlock(&kfd_processes_mutex);
}
return ret;

View File

@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
kfd_hws_hang(dqm);
return -ETIME;
}

View File

@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@ -3044,6 +3047,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@ -4820,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@ -5921,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
return NULL;
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@ -6306,19 +6318,16 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
stream->use_vsc_sdp_for_colorimetry = false;
if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
stream->use_vsc_sdp_for_colorimetry =
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
} else {
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
}
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
@ -8762,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);

View File

@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
/* Maximum resolution supported by DWB */
return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,

View File

@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
!pipe->stream->link_enc)) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn316_disable_otg_wa(clk_mgr_base, context, true);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, false);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}

View File

@ -73,6 +73,12 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
#define regCLK5_0_CLK5_spll_field_8 0x464b
#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#define REG(reg_name) \
@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_context *ctx = clk_mgr->base.ctx;
uint32_t ssc_enable;
REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
return ssc_enable == 1;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
if (dcn35_is_spll_ssc_enabled(clk_mgr))
clk_mgr->dp_dto_source_clock_in_khz =
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
static struct dcn35_ss_info_table ss_info_table = {
.ss_divider = 1000,
.ss_percentage = {0, 0, 375, 375, 375}
};
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
struct dc_context *ctx = clk_mgr->base.ctx;
uint32_t clock_source;
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
// If it's DFS mode, clock_source is 0.
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
if (clk_mgr->dprefclk_ss_percentage != 0) {
clk_mgr->ss_on_dprefclk = true;
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
}
}
}
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
dcn35_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {

View File

@ -436,6 +436,15 @@ bool dc_state_add_plane(
goto out;
}
if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
/* ODM combine could prevent us from supporting more planes
* we will reset ODM slice count back to 1 when all planes have
* been removed to maximize the amount of planes supported when
* new planes are added.
*/
resource_update_pipes_for_stream_with_slice_count(
state, dc->current_state, dc->res_pool, stream, 1);
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)

View File

@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
// Apply ssed(spread spectrum) dpref clock for edp only.
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
&& pix_clk_params->signal_type == SIGNAL_TYPE_EDP
&& encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);

View File

@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
}

View File

@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
static int smu_reset_mp1_state(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if ((!adev->in_runpm) && (!adev->in_suspend) &&
(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev))
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
return ret;
}
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
return smu_smc_hw_cleanup(smu);
ret = smu_smc_hw_cleanup(smu);
if (ret)
return ret;
ret = smu_reset_mp1_state(smu);
if (ret)
return ret;
return 0;
}
static void smu_late_fini(void *handle)

View File

@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
SMU_BACO_STATE_NONE,
};
struct smu_baco_context {

View File

@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
//Freq in MHz
//Voltage in milli volts with 2 fractional bits
typedef struct {
uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
uint8_t NumDcfClkLevelsEnabled;
uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
uint8_t NumSocClkLevelsEnabled;
uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
uint8_t VpeClkLevelsEnabled;
uint8_t NumMemPstatesEnabled;
uint8_t NumFclkLevelsEnabled;
uint8_t spare;
uint32_t MinGfxClk;
uint32_t MaxGfxClk;
} DpmClocks_t_v14_0_1;
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
#define TABLE_SPARE0 5 // Unused
#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8

View File

@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
#define FEATURE_VCN_DPM_BIT 10
#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
#define FEATURE_SPARE0_BIT 26 //SPARE
#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
#define FEATURE_SMART_L3_RINSER_BIT 34
#define FEATURE_SPARE1_BIT 35 //SPARE
#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
#define FEATURE_DS_VCN_BIT 45
#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
#define FEATURE_SPARE2_BIT 49 //SPARE
#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
#define FEATURE_SPARE_61 61
#define FEATURE_FP_DIDT 62
#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
typedef struct {
// MP1_EXT_SCRATCH0
uint32_t DpmHandlerID : 8;
uint32_t ActivityMonitorID : 8;
uint32_t DpmTimerID : 8;
uint32_t DpmHubID : 4;
uint32_t DpmHubTask : 4;
// MP1_EXT_SCRATCH1
uint32_t CclkSyncStatus : 8;
uint32_t ZstateStatus : 4;
uint32_t Cpu1VddOff : 4;
uint32_t DstateFun : 4;
uint32_t DstateDev : 4;
uint32_t GfxOffStatus : 2;
uint32_t Cpu0Off : 2;
uint32_t Cpu1Off : 2;
uint32_t Cpu0VddOff : 2;
// MP1_EXT_SCRATCH2
uint32_t P2JobHandler :32;
// MP1_EXT_SCRATCH3
uint32_t PostCode :32;
// MP1_EXT_SCRATCH4
uint32_t MsgPortBusy :15;
uint32_t RsmuPmiP1Pending : 1;
uint32_t RsmuPmiP2PendingCnt : 8;
uint32_t DfCstateExitPending : 1;
uint32_t Pc6EntryPending : 1;
uint32_t Pc6ExitPending : 1;
uint32_t WarmResetPending : 1;
uint32_t Mp0ClkPending : 1;
uint32_t InWhisperMode : 1;
uint32_t spare2 : 2;
// MP1_EXT_SCRATCH5
uint32_t IdleMask :32;
// MP1_EXT_SCRATCH6 = RTOS threads' status
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t_v14_0_1;
#pragma pack(pop)

View File

@ -72,23 +72,19 @@
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
#define PPSMC_MSG_spare_0x17 0x17
#define PPSMC_MSG_spare_0x18 0x18
#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
#define PPSMC_MSG_spare_0x20 0x20
#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
@ -99,8 +95,8 @@
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**

View File

@ -27,6 +27,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
#define FEATURE_MASK(feature) (1ULL << feature)

View File

@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_set_mp1_state(smu, mp1_state);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
break;
default:
/* Ignore others */

View File

@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (!en && !adev->in_s0ix)
if (!en && !adev->in_s0ix) {
/* Adds a GFX reset as workaround just before sending the
* MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
* an invalid state.
*/
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
SMU_RESET_MODE_2, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
return ret;
}

View File

@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
break;
default:

View File

@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@ -593,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
uint32_t *freq)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
switch (clk_type) {
case SMU_SOCCLK:
if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level];
break;
case SMU_VCLK:
if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->VClocks0[dpm_level];
break;
case SMU_DCLK:
if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->DClocks0[dpm_level];
break;
case SMU_VCLK1:
if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->VClocks1[dpm_level];
break;
case SMU_DCLK1:
if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
return -EINVAL;
*freq = clk_table->DClocks1[dpm_level];
break;
case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= clk_table->NumMemPstatesEnabled)
return -EINVAL;
*freq = clk_table->MemPstateTable[dpm_level].MemClk;
break;
case SMU_FCLK:
if (dpm_level >= clk_table->NumFclkLevelsEnabled)
return -EINVAL;
*freq = clk_table->FclkClocks_Freq[dpm_level];
break;
default:
return -EINVAL;
}
return 0;
}
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@ -637,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
uint32_t *freq)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
return 0;
}
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@ -657,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@ -666,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
uint32_t clock_limit;
uint32_t max_dpm_level, min_dpm_level;
int ret = 0;
if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk;
break;
case SMU_FCLK:
clock_limit = smu->smu_table.boot_values.fclk;
break;
case SMU_GFXCLK:
case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk;
break;
case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk;
break;
case SMU_VCLK:
case SMU_VCLK1:
clock_limit = smu->smu_table.boot_values.vclk;
break;
case SMU_DCLK:
case SMU_DCLK1:
clock_limit = smu->smu_table.boot_values.dclk;
break;
default:
clock_limit = 0;
break;
}
/* clock in Mhz unit */
if (min)
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
return 0;
}
if (max) {
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
*max = clk_table->MaxGfxClk;
break;
case SMU_MCLK:
case SMU_UCLK:
case SMU_FCLK:
max_dpm_level = 0;
break;
case SMU_SOCCLK:
max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
break;
case SMU_VCLK:
case SMU_DCLK:
max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
break;
case SMU_VCLK1:
case SMU_DCLK1:
max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
break;
default:
ret = -EINVAL;
goto failed;
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
}
if (min) {
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
*min = clk_table->MinGfxClk;
break;
case SMU_MCLK:
case SMU_UCLK:
min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
break;
case SMU_FCLK:
min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
break;
case SMU_SOCCLK:
min_dpm_level = 0;
break;
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
min_dpm_level = 0;
break;
default:
ret = -EINVAL;
goto failed;
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
}
failed:
return ret;
}
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@ -736,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@ -768,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@ -778,6 +967,19 @@ failed:
return ret;
}
static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
return 0;
}
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@ -811,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
switch (clk_type) {
case SMU_SOCCLK:
*count = clk_table->NumSocClkLevelsEnabled;
break;
case SMU_VCLK:
case SMU_DCLK:
*count = clk_table->Vcn0ClkLevelsEnabled;
break;
case SMU_VCLK1:
case SMU_DCLK1:
*count = clk_table->Vcn1ClkLevelsEnabled;
break;
case SMU_MCLK:
*count = clk_table->NumMemPstatesEnabled;
break;
case SMU_FCLK:
*count = clk_table->NumFclkLevelsEnabled;
break;
default:
break;
}
return 0;
}
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@ -840,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
return 0;
}
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@ -866,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
case SMU_VCLK1:
case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@ -940,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
msg_set_min = SMU_MSG_SetHardMinVcn;
msg_set_max = SMU_MSG_SetSoftMaxVcn;
msg_set_min = SMU_MSG_SetHardMinVcn0;
msg_set_max = SMU_MSG_SetSoftMaxVcn0;
break;
case SMU_VCLK1:
case SMU_DCLK1:
msg_set_min = SMU_MSG_SetHardMinVcn1;
msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@ -971,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@ -1000,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@ -1067,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
smu->gfx_actual_hard_min_freq = 0;
smu->gfx_actual_soft_max_freq = 0;
return 0;
}
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@ -1079,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
return 0;
}
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@ -1095,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
uint8_t idx;
/* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
clock_table->SocClocks[idx].Vol = 0;
}
for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
clock_table->VPEClocks[idx].Vol = 0;
}
return 0;
}
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@ -1114,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
smu_14_0_0_get_dpm_table(smu, clock_table);
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
smu_14_0_1_get_dpm_table(smu, clock_table);
return 0;
}
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@ -1135,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
.get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
.get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
.set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
.set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_14_0_0_get_dpm_table,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)

View File

@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
if (++i > 200)
break;
}
}
}

View File

@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:

View File

@ -2534,7 +2534,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2543,12 +2544,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
if (pipe == INVALID_PIPE ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
if (new_cdclk_state->disable_pipes) {
cdclk_config = new_cdclk_state->actual;
pipe = INVALID_PIPE;
} else {
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
cdclk_config = new_cdclk_state->actual;
pipe = new_cdclk_state->pipe;
} else {
cdclk_config = old_cdclk_state->actual;
pipe = INVALID_PIPE;
}
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
old_cdclk_state->actual.voltage_level);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@ -2566,7 +2580,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2575,12 +2589,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
if (pipe != INVALID_PIPE &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
if (!new_cdclk_state->disable_pipes &&
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
pipe = new_cdclk_state->pipe;
else
pipe = INVALID_PIPE;
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@ -3058,6 +3075,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@ -3236,6 +3254,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
new_cdclk_state->disable_pipes = true;
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}

View File

@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
/* update cdclk with pipes disabled */
bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);

View File

@ -4256,7 +4256,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
/*
* FIXME the modeset sequence is currently wrong and
* can't deal with bigjoiner + port sync at the same time.
*/
return crtc_state1->hw.active && crtc_state2->hw.active &&
!crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&

View File

@ -2725,7 +2725,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
if (has_seamless_m_n(connector))
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that when updating M/N live.
*/
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {

View File

@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
*hdcp_capable = false;
*hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
return ret;
drm_dbg_kms(&i915->drm,
"HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)

View File

@ -1422,6 +1422,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
/*
* FIXME figure out what is wrong with PSR+bigjoiner and
* fix it. Presumably something related to the fact that
* PSR is a transcoder level feature.
*/
if (crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
"PSR disabled due to bigjoiner\n");
return;
}
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
else

View File

@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that during VRR toggle/push/etc.
*/
if (crtc_state->bigjoiner_pipes)
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;

View File

@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
* 'reset_in_progress' flag available, so just use that.
* I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
* So just use those. Note that testing both is required due to the hideously
* complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
* (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
* is fine because there is another cancel in _finish (when the reset flag is
* not).
* (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
* reset flag/mutex are set) is fine because there is another explicit cancel in
* intel_guc_submission_fini (when the reset flag/mutex are not).
*/
if (guc_to_gt(guc)->uc.reset_in_progress)
if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
/*
* Ensure the busyness worker gets cancelled even on a fatal wedge.
* Note that reset_prepare is not allowed to because it confuses lockdep.
*/
if (guc_submission_initialized(guc))
guc_cancel_busyness_worker(guc);
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);

View File

@ -637,6 +637,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
/*
* NB: The wedge code path results in prepare -> prepare -> finish -> finish.
* So this function is sometimes called with the in-progress flag not set.
*/
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */

View File

@ -1377,6 +1377,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a618(gpu))
gpu->ubwc_config.highest_bank_bit = 14;
if (adreno_is_a619(gpu))
/* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
gpu->ubwc_config.highest_bank_bit = 13;
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;

View File

@ -852,7 +852,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
(block->type << 8) | i);
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
block->size, out);
out += block->size * sizeof(u32);
}

View File

@ -324,6 +324,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
},
};
/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -358,8 +359,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
@ -368,7 +369,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.base = 0x38000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
@ -381,6 +382,33 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3A000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
},
};

View File

@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, entry,
debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
debugfs_create_u32("threshold_high", 0600, entry,
debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0600, entry,
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, entry,
debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, entry,
debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);

View File

@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}

View File

@ -484,7 +484,7 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
}
}
static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
{
int rc = 0;
@ -541,7 +541,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp);
rc = dp_display_handle_port_status_changed(dp);
else
rc = dp_display_handle_irq_hpd(dp);
}
@ -588,6 +588,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@ -645,6 +646,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}

View File

@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;

View File

@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}

View File

@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,

View File

@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1;

View File

@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
/* Can happen if the last fault only partially filled this
* section of the pages array before failing. In that case
* we skip already filled pages.
*/
if (pages[i])
continue;
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
goto err_unlock;
}
}
@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_pages;
goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:

View File

@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
int count = 0, sc = 0;
bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
release = container_of(fence, struct qxl_release, base);
have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
if (!wait_event_timeout(qdev->release_event,
(dma_fence_is_signaled(fence) ||
(qxl_io_notify_oom(qdev), 0)),
timeout))
return 0;
retry:
sc++;
if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
for (count = 0; count < 11; count++) {
if (!qxl_queue_garbage_collect(qdev, true))
break;
if (dma_fence_is_signaled(fence))
goto signaled;
}
if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
if (sc > 2)
/* back off */
usleep_range(500, 1000);
if (time_after(jiffies, end))
return 0;
if (have_drawable_releases && sc > 300) {
DMA_FENCE_WARN(fence,
"failed to wait on release %llu after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
}
/*
* yeah, original sync_obj_wait gave up after 3 spins when
* have_drawable_releases is not set.
*/
signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;

View File

@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
/*
* When running with SEV we always want dma mappings, because
* otherwise ttm tt pool pages will bounce through swiotlb running
* out of available space.
*/
if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;

View File

@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);

View File

@ -125,7 +125,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)

View File

@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)

View File

@ -525,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
/* TODO: Timestamp */
}

View File

@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
(size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE;
u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */

View File

@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0;
}
static int host1x_dma_configure(struct device *dev)
{
return of_dma_configure(dev, dev->of_node, true);
}
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
.dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);

View File

@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
}
#define DMA_FENCE_WARN(f, fmt, args...) \
do { \
struct dma_fence *__ff = (f); \
pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
##args); \
} while (0)
#endif /* __LINUX_DMA_FENCE_H */