Merge tag 'amd-drm-fixes-6.4-2023-06-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.4-2023-06-14:

amdgpu:
- GFX9 preemption fixes
- Add missing radeon secondary PCI ID
- vblflash fixes
- SMU 13 fix
- VCN 4.0 fix
- Re-enable TOPDOWN flag for large BAR systems to fix regression
- eDP fix
- PSR hang fix
- DPIA fix

radeon:
- fbdev client warning fix

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230615024011.7773-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2023-06-16 09:08:13 +10:00
commit c8ac109e21
13 changed files with 256 additions and 20 deletions

View File

@ -1615,6 +1615,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x5874,
0x5940,
0x5941,
0x5b70,
0x5b72,
0x5b73,
0x5b74,

View File

@ -140,7 +140,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)

View File

@ -3548,6 +3548,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
void *fw_pri_cpu_addr;
int ret;
if (adev->psp.vbflash_image_size == 0)
return -EINVAL;
dev_info(adev->dev, "VBIOS flash to PSP started");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
@ -3599,13 +3602,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0664},
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
.write = amdgpu_psp_vbflash_write,
.read = amdgpu_psp_vbflash_read,
};
static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL);
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
{

View File

@ -581,3 +581,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_end(ring);
}
void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
}
void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
}
void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}

View File

@ -227,6 +227,9 @@ struct amdgpu_ring_funcs {
int (*preempt_ib)(struct amdgpu_ring *ring);
void (*emit_mem_sync)(struct amdgpu_ring *ring);
void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
};
struct amdgpu_ring {
@ -318,10 +321,16 @@ struct amdgpu_ring {
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);

View File

@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
amdgpu_fence_update_start_timestamp(e->ring,
chunk->sync_seq,
ktime_get());
if (chunk->sync_seq ==
le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
if (chunk->cntl_offset <= e->ring->buf_mask)
amdgpu_ring_patch_cntl(e->ring,
chunk->cntl_offset);
if (chunk->ce_offset <= e->ring->buf_mask)
amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
if (chunk->de_offset <= e->ring->buf_mask)
amdgpu_ring_patch_de(e->ring, chunk->de_offset);
}
amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
chunk->start,
chunk->end);
@ -407,6 +417,17 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
amdgpu_ring_mux_end_ib(mux, ring);
}
void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
unsigned offset;
offset = ring->wptr & ring->buf_mask;
amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
}
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
@ -429,6 +450,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r
}
chunk->start = ring->wptr;
/* the initialized value used to check if they are set by the ib submission*/
chunk->cntl_offset = ring->buf_mask + 1;
chunk->de_offset = ring->buf_mask + 1;
chunk->ce_offset = ring->buf_mask + 1;
list_add_tail(&chunk->entry, &e->list);
}
@ -454,6 +479,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a
}
}
void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
struct amdgpu_ring *ring, u64 offset,
enum amdgpu_ring_mux_offset_type type)
{
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry!\n");
return;
}
chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
if (!chunk) {
DRM_ERROR("cannot find chunk!\n");
return;
}
switch (type) {
case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
chunk->cntl_offset = offset;
break;
case AMDGPU_MUX_OFFSET_TYPE_DE:
chunk->de_offset = offset;
break;
case AMDGPU_MUX_OFFSET_TYPE_CE:
chunk->ce_offset = offset;
break;
default:
DRM_ERROR("invalid type (%d)\n", type);
break;
}
}
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;

View File

@ -50,6 +50,12 @@ struct amdgpu_mux_entry {
struct list_head list;
};
enum amdgpu_ring_mux_offset_type {
AMDGPU_MUX_OFFSET_TYPE_CONTROL,
AMDGPU_MUX_OFFSET_TYPE_DE,
AMDGPU_MUX_OFFSET_TYPE_CE,
};
struct amdgpu_ring_mux {
struct amdgpu_ring *real_ring;
@ -72,12 +78,18 @@ struct amdgpu_ring_mux {
* @sync_seq: the fence seqno related with the saved IB.
* @start:- start location on the software ring.
* @end:- end location on the software ring.
* @control_offset:- the PRE_RESUME bit position used for resubmission.
* @de_offset:- the anchor in write_data for de meta of resubmission.
* @ce_offset:- the anchor in write_data for ce meta of resubmission.
*/
struct amdgpu_mux_chunk {
struct list_head entry;
uint32_t sync_seq;
u64 start;
u64 end;
u64 cntl_offset;
u64 de_offset;
u64 ce_offset;
};
int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
@ -89,6 +101,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri
u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
u64 offset, enum amdgpu_ring_mux_offset_type type);
bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux);
u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring);
@ -97,6 +111,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring);
void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring);
void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type);
const char *amdgpu_sw_ring_name(int idx);
unsigned int amdgpu_sw_ring_priority(int idx);

View File

@ -755,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@ -5127,7 +5127,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
gfx_v9_0_ring_emit_de_meta(ring,
(!amdgpu_sriov_vf(ring->adev) &&
flags & AMDGPU_IB_PREEMPTED) ?
true : false);
true : false,
job->gds_size > 0 && job->gds_base != 0);
}
amdgpu_ring_write(ring, header);
@ -5138,9 +5139,83 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
#endif
lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_ib_on_emit_cntl(ring);
amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
unsigned offset)
{
u32 control = ring->ring[offset];
control |= INDIRECT_BUFFER_PRE_RESUME(1);
ring->ring[offset] = control;
}
static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
unsigned offset)
{
struct amdgpu_device *adev = ring->adev;
void *ce_payload_cpu_addr;
uint64_t payload_offset, payload_size;
payload_size = sizeof(struct v9_ce_ib_state);
if (ring->is_mes_queue) {
payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
} else {
payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
}
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
} else {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
(ring->buf_mask + 1 - offset) << 2);
payload_size -= (ring->buf_mask + 1 - offset) << 2;
memcpy((void *)&ring->ring[0],
ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
payload_size);
}
}
static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
unsigned offset)
{
struct amdgpu_device *adev = ring->adev;
void *de_payload_cpu_addr;
uint64_t payload_offset, payload_size;
payload_size = sizeof(struct v9_de_ib_state);
if (ring->is_mes_queue) {
payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
} else {
payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
}
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
} else {
memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
(ring->buf_mask + 1 - offset) << 2);
payload_size -= (ring->buf_mask + 1 - offset) << 2;
memcpy((void *)&ring->ring[0],
de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
payload_size);
}
}
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
@ -5336,6 +5411,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
amdgpu_ring_ib_on_emit_ce(ring);
if (resume)
amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
sizeof(ce_payload) >> 2);
@ -5369,10 +5446,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
amdgpu_ring_alloc(ring, 13);
gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
/*reset the CP_VMID_PREEMPT after trailing fence*/
amdgpu_ring_emit_wreg(ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
0x0);
/* assert IB preemption, emit the trailing fence */
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
@ -5395,6 +5468,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
}
/*reset the CP_VMID_PREEMPT after trailing fence*/
amdgpu_ring_emit_wreg(ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
0x0);
amdgpu_ring_commit(ring);
/* deassert preemption condition */
@ -5402,7 +5479,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
{
struct amdgpu_device *adev = ring->adev;
struct v9_de_ib_state de_payload = {0};
@ -5433,8 +5510,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
PAGE_SIZE);
}
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
}
cnt = (sizeof(de_payload) >> 2) + 4 - 2;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
@ -5445,6 +5524,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
amdgpu_ring_ib_on_emit_de(ring);
if (resume)
amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
sizeof(de_payload) >> 2);
@ -6855,6 +6935,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.patch_cntl = gfx_v9_0_ring_patch_cntl,
.patch_de = gfx_v9_0_ring_patch_de_meta,
.patch_ce = gfx_v9_0_ring_patch_ce_meta,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {

View File

@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->vcn.harvest_config & (1 << i))
continue;
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
if (i == 0)
atomic_set(&adev->vcn.inst[i].sched_score, 1);
else
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],

View File

@ -7196,7 +7196,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
/* most eDP supports only timings from its edid,
* usually only detailed timings are available
* from eDP edid. timings which are not from edid
* may damage eDP
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@ -8198,6 +8204,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
mutex_unlock(&dm->dc_lock);
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
@ -8211,10 +8223,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
update_planes_and_stream_adapter(dm->dc,
acrtc_state->update_type,
planes_count,

View File

@ -980,6 +980,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_caps.dongle_type !=
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
converter_disable_audio = true;
/* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
break;
}

View File

@ -1696,10 +1696,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_COMPUTE_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_CUSTOM);
} else {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
}
if (workload_type < 0)
return -EINVAL;

View File

@ -307,6 +307,7 @@ static void radeon_fbdev_client_unregister(struct drm_client_dev *client)
if (fb_helper->info) {
vga_switcheroo_client_fb_set(rdev->pdev, NULL);
drm_helper_force_disable_all(dev);
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);