drm/amdgpu: move sync into job object

No need to keep that for every IB.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-02-08 12:13:05 +01:00 committed by Alex Deucher
parent 9f2ade33e6
commit e86f9ceee1
13 changed files with 46 additions and 38 deletions

View file

@ -776,7 +776,6 @@ struct amdgpu_ib {
bool grabbed_vmid;
struct amdgpu_vm *vm;
struct amdgpu_ctx *ctx;
struct amdgpu_sync sync;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
@ -1178,6 +1177,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, void *owner,
struct fence *last_vm_update,
struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
@ -1236,6 +1236,7 @@ struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
void *owner;

View file

@ -411,7 +411,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
r = amdgpu_sync_resv(p->adev, &p->job->ibs[0].sync, resv, p->filp);
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
if (r)
return r;
@ -491,7 +491,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, vm->page_directory_fence);
r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
if (r)
return r;
@ -517,14 +517,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
return r;
f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, f);
r = amdgpu_sync_fence(adev, &p->job->sync, f);
if (r)
return r;
}
}
r = amdgpu_vm_clear_invalids(adev, vm, &p->job->ibs[0].sync);
r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@ -698,11 +698,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ib *ib;
int i, j, r;
/* Add dependencies to first IB */
ib = &p->job->ibs[0];
for (i = 0; i < p->nchunks; ++i) {
struct drm_amdgpu_cs_chunk_dep *deps;
struct amdgpu_cs_chunk *chunk;
@ -740,7 +737,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return r;
} else if (fence) {
r = amdgpu_sync_fence(adev, &ib->sync, fence);
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)

View file

@ -74,8 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
amdgpu_sync_create(&ib->sync);
ib->vm = vm;
return 0;
@ -91,7 +89,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
{
amdgpu_sync_free(&ib->sync);
amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
if (ib->fence)
fence_put(&ib->fence->base);
@ -121,6 +118,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, void *owner,
struct fence *last_vm_update,
struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
@ -152,16 +150,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
r = amdgpu_sync_wait(&ibs->sync);
if (r) {
amdgpu_ring_undo(ring);
dev_err(adev->dev, "failed to sync wait (%d)\n", r);
return r;
}
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
amdgpu_vm_flush(ring, vm, last_vm_update);
if (ring->funcs->emit_gds_switch)
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,

View file

@ -46,6 +46,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
return 0;
}
@ -73,6 +75,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i]);
amdgpu_bo_unref(&job->uf.bo);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@ -99,23 +102,22 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_sync *sync = &job->ibs->sync;
struct amdgpu_vm *vm = job->ibs->vm;
struct fence *fence = amdgpu_sync_get_fence(sync);
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
struct amdgpu_ring *ring = job->ring;
int r;
r = amdgpu_vm_grab_id(vm, ring, sync,
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
else
job->ibs->grabbed_vmid = true;
fence = amdgpu_sync_get_fence(sync);
fence = amdgpu_sync_get_fence(&job->sync);
}
return fence;
@ -132,9 +134,16 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
r = amdgpu_sync_wait(&job->sync);
if (r) {
DRM_ERROR("failed to sync wait (%d)\n", r);
return NULL;
}
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->owner, &fence);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner,
job->sync.last_vm_update, &fence);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;

View file

@ -1032,7 +1032,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
return r;
if (resv) {
r = amdgpu_sync_resv(adev, &job->ibs[0].sync, resv,
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);

View file

@ -874,7 +874,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
if (r)
goto err_free;

View file

@ -411,7 +411,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err;
@ -473,7 +474,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err;

View file

@ -473,7 +473,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (ib->length_dw != 0) {
amdgpu_ring_pad_ib(ring, ib);
amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
WARN_ON(ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@ -714,7 +715,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ib = &job->ibs[0];
r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv,
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
if (r)
goto error_free;

View file

@ -633,7 +633,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err1;

View file

@ -2641,7 +2641,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err2;

View file

@ -709,7 +709,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err2;
@ -1264,7 +1265,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;

View file

@ -691,7 +691,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err1;

View file

@ -842,7 +842,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r)
goto err1;