drm/scheduler: modify API to avoid redundancy

entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Nayan Deshmukh 2018-07-20 17:51:05 +05:30 committed by Alex Deucher
parent bf314ca3f1
commit cdc5017659
13 changed files with 30 additions and 42 deletions

View File

@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);

View File

@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
drm_sched_entity_destroy(&adev->rings[j]->sched,
&ctx->rings[j].entity);
drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity, max_wait);
max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
max_wait);
}
}
mutex_unlock(&mgr->lock);
@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}

View File

@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (!f)
return -EINVAL;
r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;

View File

@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
} else {
drm_sched_entity_destroy(adev->mman.entity.sched,
&adev->mman.entity);
drm_sched_entity_destroy(&adev->mman.entity);
dma_fence_put(man->move);
man->move = NULL;
}

View File

@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
&adev->uvd.entity);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);

View File

@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);

View File

@ -2642,7 +2642,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
drm_sched_entity_destroy(&ring->sched, &vm->entity);
drm_sched_entity_destroy(&vm->entity);
return r;
}
@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");

View File

@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
drm_sched_entity_destroy(&gpu->sched,
&ctx->sched_entity[i]);
drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}

View File

@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
{
int ret;
ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
sched_entity, submit->cmdbuf.ctx);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
submit->cmdbuf.ctx);
if (ret)
return ret;

View File

@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
*
* Returns the remaining time in jiffies left from the input timeout
*/
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout)
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
long ret = timeout;
sched = entity->sched;
if (!drm_sched_entity_is_initialized(sched, entity))
return ret;
/**
@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
* entity and signals all jobs with an error code if the process was killed.
*
*/
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched;
sched = entity->sched;
drm_sched_entity_set_rq(entity, NULL);
/* Consumption of existing IBs wasn't completed. Forcefully
@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(sched, entity);
drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
struct drm_gpu_scheduler *sched = entity->sched;
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;

View File

@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_entity_destroy(&v3d->queue[q].sched,
&v3d_priv->sched_entity[q]);
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);

View File

@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
&v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
&v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)

View File

@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
unsigned int num_rq_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,