drm/amdgpu: move UVD and VCE sched entity init after sched init

We need kernel scheduling entities to deal with handle clean up
if apps are not cleaned up properly.  With commit 56e449603f
("drm/sched: Convert the GPU scheduler to variable number of run-queues")
the scheduler entities have to be created after scheduler init, so
change the ordering to fix this.

v2: Leave logic in UVD and VCE code

Fixes: 56e449603f ("drm/sched: Convert the GPU scheduler to variable number of run-queues")
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: ltuikov89@gmail.com
This commit is contained in:
Alex Deucher 2023-11-08 09:40:44 -05:00
parent 8ed79c409e
commit 037b98a231
13 changed files with 36 additions and 45 deletions

View File

@ -2584,6 +2584,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
ring->name);
return r;
}
}
amdgpu_xcp_update_partition_sched_list(adev);

View File

@ -399,20 +399,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
* Initialize the entity used for handle management in the kernel driver.
*/
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
struct amdgpu_ring *ring;
struct drm_gpu_scheduler *sched;
int r;
if (ring == &adev->uvd.inst[0].ring) {
struct drm_gpu_scheduler *sched = &ring->sched;
int r;
ring = &adev->uvd.inst[0].ring;
sched = &ring->sched;
r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
}
}
return 0;

View File

@ -73,7 +73,7 @@ struct amdgpu_uvd {
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);

View File

@ -231,20 +231,20 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
* Initialize the entity used for handle management in the kernel driver.
*/
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
struct amdgpu_ring *ring;
struct drm_gpu_scheduler *sched;
int r;
if (ring == &adev->vce.ring[0]) {
struct drm_gpu_scheduler *sched = &ring->sched;
int r;
ring = &adev->vce.ring[0];
sched = &ring->sched;
r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
}
}
return 0;

View File

@ -55,7 +55,7 @@ struct amdgpu_vce {
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);

View File

@ -577,8 +577,6 @@ static int uvd_v3_1_sw_init(void *handle)
ptr += ucode_len;
memcpy(&adev->uvd.keyselect, ptr, 4);
r = amdgpu_uvd_entity_init(adev);
return r;
}

View File

@ -127,8 +127,6 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
return r;
}

View File

@ -125,8 +125,6 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
return r;
}

View File

@ -432,8 +432,6 @@ static int uvd_v6_0_sw_init(void *handle)
}
}
r = amdgpu_uvd_entity_init(adev);
return r;
}

View File

@ -480,10 +480,6 @@ static int uvd_v7_0_sw_init(void *handle)
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;

View File

@ -441,8 +441,6 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
r = amdgpu_vce_entity_init(adev);
return r;
}

View File

@ -450,8 +450,6 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
r = amdgpu_vce_entity_init(adev);
return r;
}

View File

@ -486,11 +486,6 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
r = amdgpu_vce_entity_init(adev);
if (r)
return r;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;