drm/amdgpu: split the VM entity into direct and delayed

For page fault handling we need to use a direct update which can't be
blocked by ongoing user CS.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2019-07-19 14:41:12 +02:00 committed by Alex Deucher
parent 3084cf46cf
commit a2cf324785
4 changed files with 24 additions and 13 deletions

View File

@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
if ((*id)->owner != vm->entity.fence_context ||
if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->entity.fence_context)
if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->entity.fence_context;
id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);

View File

@ -2671,12 +2671,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */
r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
goto error_free_direct;
vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@ -2705,7 +2710,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
goto error_free_sched_entity;
goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
@ -2748,8 +2753,11 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
error_free_sched_entity:
drm_sched_entity_destroy(&vm->entity);
error_free_delayed:
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
drm_sched_entity_destroy(&vm->direct);
return r;
}
@ -2938,7 +2946,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
drm_sched_entity_destroy(&vm->entity);
drm_sched_entity_destroy(&vm->direct);
drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");

View File

@ -257,8 +257,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
/* Scheduler entity for page table updates */
struct drm_sched_entity entity;
/* Scheduler entities for page table updates */
struct drm_sched_entity direct;
struct drm_sched_entity delayed;
unsigned int pasid;
/* dedicated to vm */

View File

@ -99,12 +99,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence *f;
int r;
ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
r = amdgpu_job_submit(p->job, &p->vm->entity,
r = amdgpu_job_submit(p->job, &p->vm->delayed,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;