drm/msm: drop drm_gem_object_put_locked()
No idea why we were still using this. It certainly hasn't been needed for some time. So drop the pointless twin codepaths. Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210728010632.2633470-4-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
86c2a0f000
commit
030af2b05a
|
@ -117,13 +117,13 @@ reset_set(void *data, u64 val)
|
|||
|
||||
if (a5xx_gpu->pm4_bo) {
|
||||
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
|
||||
drm_gem_object_put_locked(a5xx_gpu->pm4_bo);
|
||||
drm_gem_object_put(a5xx_gpu->pm4_bo);
|
||||
a5xx_gpu->pm4_bo = NULL;
|
||||
}
|
||||
|
||||
if (a5xx_gpu->pfp_bo) {
|
||||
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
|
||||
drm_gem_object_put_locked(a5xx_gpu->pfp_bo);
|
||||
drm_gem_object_put(a5xx_gpu->pfp_bo);
|
||||
a5xx_gpu->pfp_bo = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1415,7 +1415,7 @@ struct a5xx_gpu_state {
|
|||
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
|
||||
struct a5xx_crashdumper *dumper)
|
||||
{
|
||||
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
|
||||
dumper->ptr = msm_gem_kernel_new(gpu->dev,
|
||||
SZ_1M, MSM_BO_WC, gpu->aspace,
|
||||
&dumper->bo, &dumper->iova);
|
||||
|
||||
|
@ -1517,7 +1517,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
|
|||
|
||||
if (a5xx_crashdumper_run(gpu, &dumper)) {
|
||||
kfree(a5xx_state->hlsqregs);
|
||||
msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
|
||||
msm_gem_kernel_put(dumper.bo, gpu->aspace);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1525,7 +1525,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
|
|||
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
|
||||
count * sizeof(u32));
|
||||
|
||||
msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
|
||||
msm_gem_kernel_put(dumper.bo, gpu->aspace);
|
||||
}
|
||||
|
||||
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
|
||||
|
|
|
@ -362,7 +362,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
|||
*/
|
||||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
ptr = msm_gem_kernel_new_locked(drm, bosize,
|
||||
ptr = msm_gem_kernel_new(drm, bosize,
|
||||
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
|
||||
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
|
||||
if (IS_ERR(ptr))
|
||||
|
|
|
@ -240,7 +240,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
|
|||
A5XX_PREEMPT_COUNTER_SIZE,
|
||||
MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
|
||||
if (IS_ERR(counters)) {
|
||||
msm_gem_kernel_put(bo, gpu->aspace, true);
|
||||
msm_gem_kernel_put(bo, gpu->aspace);
|
||||
return PTR_ERR(counters);
|
||||
}
|
||||
|
||||
|
@ -272,9 +272,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
|
||||
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
|
||||
gpu->aspace, true);
|
||||
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
|
||||
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1129,12 +1129,12 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
|
|||
|
||||
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
|
||||
{
|
||||
msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
|
||||
msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
|
||||
msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
|
||||
msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
|
||||
msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
|
||||
msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
|
||||
msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
|
||||
|
||||
gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
|
||||
msm_gem_address_space_put(gmu->aspace);
|
||||
|
|
|
@ -1035,7 +1035,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||
|
||||
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
|
||||
if (!a6xx_gpu->shadow_bo) {
|
||||
a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
|
||||
a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
|
||||
sizeof(u32) * gpu->nr_rings,
|
||||
MSM_BO_WC | MSM_BO_MAP_PRIV,
|
||||
gpu->aspace, &a6xx_gpu->shadow_bo,
|
||||
|
|
|
@ -112,7 +112,7 @@ static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
|
|||
static int a6xx_crashdumper_init(struct msm_gpu *gpu,
|
||||
struct a6xx_crashdumper *dumper)
|
||||
{
|
||||
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
|
||||
dumper->ptr = msm_gem_kernel_new(gpu->dev,
|
||||
SZ_1M, MSM_BO_WC, gpu->aspace,
|
||||
&dumper->bo, &dumper->iova);
|
||||
|
||||
|
@ -961,7 +961,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
|
|||
a6xx_get_clusters(gpu, a6xx_state, dumper);
|
||||
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
|
||||
|
||||
msm_gem_kernel_put(dumper->bo, gpu->aspace, true);
|
||||
msm_gem_kernel_put(dumper->bo, gpu->aspace);
|
||||
}
|
||||
|
||||
if (snapshot_debugbus)
|
||||
|
|
|
@ -390,7 +390,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
|
|||
struct drm_gem_object *bo;
|
||||
void *ptr;
|
||||
|
||||
ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
|
||||
ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
|
||||
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
|
||||
|
||||
if (IS_ERR(ptr))
|
||||
|
|
|
@ -1062,7 +1062,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* don't call directly! Use drm_gem_object_put_locked() and friends */
|
||||
/* don't call directly! Use drm_gem_object_put() */
|
||||
void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
@ -1192,8 +1192,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
|
@ -1280,26 +1279,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
return obj;
|
||||
|
||||
fail:
|
||||
if (struct_mutex_locked) {
|
||||
drm_gem_object_put_locked(obj);
|
||||
} else {
|
||||
drm_gem_object_put(obj);
|
||||
}
|
||||
drm_gem_object_put(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, true);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, false);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt)
|
||||
{
|
||||
|
@ -1358,12 +1341,12 @@ fail:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova, bool locked)
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
void *vaddr;
|
||||
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
|
||||
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(obj))
|
||||
|
@ -1387,42 +1370,21 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
|||
|
||||
return vaddr;
|
||||
err:
|
||||
if (locked)
|
||||
drm_gem_object_put_locked(obj);
|
||||
else
|
||||
drm_gem_object_put(obj);
|
||||
drm_gem_object_put(obj);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
|
||||
}
|
||||
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
|
||||
}
|
||||
|
||||
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
|
||||
}
|
||||
|
||||
void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
struct msm_gem_address_space *aspace, bool locked)
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(bo))
|
||||
return;
|
||||
|
||||
msm_gem_put_vaddr(bo);
|
||||
msm_gem_unpin_iova(bo, aspace);
|
||||
|
||||
if (locked)
|
||||
drm_gem_object_put_locked(bo);
|
||||
else
|
||||
drm_gem_object_put(bo);
|
||||
drm_gem_object_put(bo);
|
||||
}
|
||||
|
||||
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
|
||||
|
|
|
@ -154,16 +154,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
struct msm_gem_address_space *aspace, bool locked);
|
||||
struct msm_gem_address_space *aspace);
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt);
|
||||
__printf(2, 3)
|
||||
|
|
|
@ -452,7 +452,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
|
|||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
submit_unlock_unpin_bo(submit, i, false);
|
||||
list_del_init(&msm_obj->submit_entry);
|
||||
drm_gem_object_put_locked(&msm_obj->base);
|
||||
drm_gem_object_put(&msm_obj->base);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -992,7 +992,7 @@ fail:
|
|||
gpu->rb[i] = NULL;
|
||||
}
|
||||
|
||||
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
|
||||
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
return ret;
|
||||
|
@ -1011,7 +1011,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
|
|||
gpu->rb[i] = NULL;
|
||||
}
|
||||
|
||||
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
|
||||
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
|
||||
|
||||
if (!IS_ERR_OR_NULL(gpu->aspace)) {
|
||||
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
|
||||
|
|
|
@ -67,7 +67,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
|
|||
|
||||
msm_fence_context_free(ring->fctx);
|
||||
|
||||
msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
|
||||
msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
|
||||
|
||||
kfree(ring);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue