Merge tag 'amd-drm-fixes-5.11-2021-02-03' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.11-2021-02-03:

amdgpu:
- Fix retry in gem create
- Vangogh fixes
- Fix for display from shared buffers
- Various display fixes

amdkfd:
- Fix regression in buffer free

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210204041300.4425-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2021-02-05 09:29:15 +10:00
commit cfd4951f93
12 changed files with 94 additions and 58 deletions

View file

@ -26,6 +26,7 @@
#include <linux/sched/task.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
domain_string(alloc_domain), ret);
goto err_bo_create;
}
bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;

View file

@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}

View file

@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv;
}
retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);

View file

@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
if (bo->prime_shared_count) {
if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else

View file

@ -99,6 +99,10 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
@ -4936,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
uint32_t tcc_disable;
switch (adev->asic_type) {
case CHIP_VANGOGH:
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
break;
default:
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
break;
}
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |

View file

@ -1833,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
if (prev_sink != NULL)
dc_sink_retain(prev_sink);
if (prev_sink)
dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
dc_state->streams[k], &bundle->stream_update, dc_state);
dc_state->streams[k], &bundle->stream_update);
}
cleanup:
@ -1965,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link)
stream_update.stream = stream_state;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
stream_state, &stream_update,
stream_state->ctx->dc->current_state);
stream_state, &stream_update);
mutex_unlock(&adev->dm.dc_lock);
}
@ -2330,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
if (aconnector->dc_sink)
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@ -2347,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
@ -7549,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
uint32_t i;
int i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@ -7590,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
@ -7813,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
&bundle->stream_update);
/**
* Enable or disable the interrupts on the backend.
@ -8150,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct dc_surface_update dummy_updates[MAX_SURFACES];
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&surface_updates, 0, sizeof(surface_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
@ -8213,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
surface_updates[j].surface = status->plane_states[j];
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
dummy_updates,
surface_updates,
status->plane_count,
dm_new_crtc_state->stream,
&stream_update,
dc_state);
&stream_update);
mutex_unlock(&dm->dc_lock);
}
@ -8359,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
goto err;
goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
goto err;
goto out;
/* force a restore */
crtc_state->mode_changed = true;
@ -8376,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
goto err;
goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
if (!ret)
return 0;
err:
DRM_ERROR("Restoring old state failed with %i\n", ret);
out:
drm_atomic_state_put(state);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}

View file

@ -833,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@ -850,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
}
return true;

View file

@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state)
struct dc_stream_update *stream_update)
{
const struct dc_stream_status *stream_status;
enum surface_update_type update_type;
@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES];
memset(new_planes, 0, sizeof(new_planes));
for (i = 0; i < surface_count; i++)
new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
context = dc_create_state(dc);
@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc,
return;
}
dc_resource_state_copy_construct(state, context);
dc_resource_state_copy_construct(
dc->current_state, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
DC_ERROR("Failed to remove streams for new validate context!\n");
return;
}
/* add surface to context */
if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
DC_ERROR("Failed to add streams for new validate context!\n");
return;
}
}

View file

@ -892,14 +892,14 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
aux_rd_interval_us = 400;
break;
case 0x02:
aux_rd_interval_us = 4000;
break;
case 0x03:
case 0x02:
aux_rd_interval_us = 8000;
break;
case 0x03:
aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
break;

View file

@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state);
struct dc_stream_update *stream_update);
/*
* Log the current stream state.
*/

View file

@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_PLL3,
DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;

View file

@ -591,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],