drm/amdgpu: add gang submit backend v2

Allows submitting jobs as gang which needs to run on multiple
engines at the same time.

Basic idea is that we have a global gang submit fence representing when the
gang leader is finally pushed to run on the hardware last.

Jobs submitted as gang are never re-submitted in case of a GPU reset since this
won't work and will just deadlock the hardware immediately again.

v2: fix logic inversion, improve documentation, fix rcu

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2022-03-02 16:26:53 +01:00 committed by Alex Deucher
parent c05d789fed
commit 68ce8b2422
4 changed files with 67 additions and 2 deletions

View file

@ -885,6 +885,7 @@ struct amdgpu_device {
u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
struct dma_fence __rcu *gang_submit;
bool ib_pool_ready;
struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
@ -1294,6 +1295,8 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)

View file

@ -3511,6 +3511,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
@ -3992,6 +3993,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
amdgpu_reset_fini(adev);
@ -5960,3 +5962,36 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
(void)RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
/**
* amdgpu_device_switch_gang - switch to a new gang
* @adev: amdgpu_device pointer
* @gang: the gang to switch to
*
* Try to switch to a new gang.
* Returns: NULL if we switched to the new gang or a reference to the current
* gang leader.
*/
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang)
{
struct dma_fence *old = NULL;
do {
dma_fence_put(old);
rcu_read_lock();
old = dma_fence_get_rcu_safe(&adev->gang_submit);
rcu_read_unlock();
if (old == gang)
break;
if (!dma_fence_is_signaled(old))
return old;
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
old, gang) != old);
dma_fence_put(old);
return NULL;
}

View file

@ -173,11 +173,29 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
dma_fence_put(&job->hw_fence);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
struct amdgpu_job *leader)
{
struct dma_fence *fence = &leader->base.s_fence->scheduled;
WARN_ON(job->gang_submit);
/*
* Don't add a reference when we are the gang leader to avoid circle
* dependency.
*/
if (job != leader)
dma_fence_get(fence);
job->gang_submit = fence;
}
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
if (!job->hw_fence.ops)
kfree(job);
@ -247,12 +265,16 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync);
}
if (!fence && job->gang_submit)
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
return fence;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
int r = 0;
@ -264,8 +286,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
/* Skip job if VRAM is lost and never resubmit gangs */
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
(job->job_run_counter && job->gang_submit))
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");

View file

@ -50,6 +50,7 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct dma_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
bool vm_needs_flush;
@ -84,6 +85,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);