mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 13:53:33 +00:00
drm/msm: Drop update_fences()
I noticed while looking at some traces, that we could miss calls to
msm_update_fence(), as the irq could have raced with retire_submits()
which could have already popped the last submit on a ring out of the
queue of in-flight submits. But walking the list of submits in the
irq handler isn't really needed, as dma_fence_is_signaled() will dtrt.
So lets just drop it entirely.
v2: use spin_lock_irqsave/restore as we are no longer protected by the
spin_lock_irqsave/restore() in update_fences()
Reported-by: Steev Klimaszewski <steev@kali.org>
Fixes: 95d1deb02a
("drm/msm/gem: Add fenced vma unpin")
Signed-off-by: Rob Clark <robdclark@chromium.org>
Tested-by: Steev Klimaszewski <steev@kali.org>
Patchwork: https://patchwork.freedesktop.org/patch/490136/
Link: https://lore.kernel.org/r/20220618161120.3451993-1-robdclark@gmail.com
This commit is contained in:
parent
b4d329c451
commit
3c7a52217a
2 changed files with 7 additions and 23 deletions
|
@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
|
|||
(int32_t)(*fctx->fenceptr - fence) >= 0;
|
||||
}
|
||||
|
||||
/* called from workqueue */
|
||||
/* called from irq handler and workqueue (in recover path) */
|
||||
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
|
||||
{
|
||||
spin_lock(&fctx->spinlock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fctx->spinlock, flags);
|
||||
fctx->completed_fence = max(fence, fctx->completed_fence);
|
||||
spin_unlock(&fctx->spinlock);
|
||||
spin_unlock_irqrestore(&fctx->spinlock, flags);
|
||||
}
|
||||
|
||||
struct msm_fence {
|
||||
|
|
|
@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
uint32_t fence)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->submit_lock, flags);
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
if (fence_after(submit->seqno, fence))
|
||||
break;
|
||||
|
||||
msm_update_fence(submit->ring->fctx,
|
||||
submit->hw_fence->seqno);
|
||||
dma_fence_signal(submit->hw_fence);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->submit_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
|
||||
size_t count, void *data, size_t datalen)
|
||||
|
@ -438,7 +420,7 @@ static void recover_worker(struct kthread_work *work)
|
|||
if (ring == cur_ring)
|
||||
fence++;
|
||||
|
||||
update_fences(gpu, ring, fence);
|
||||
msm_update_fence(ring->fctx, fence);
|
||||
}
|
||||
|
||||
if (msm_gpu_active(gpu)) {
|
||||
|
@ -736,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
|
||||
msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
|
||||
|
||||
kthread_queue_work(gpu->worker, &gpu->retire_work);
|
||||
update_sw_cntrs(gpu);
|
||||
|
|
Loading…
Reference in a new issue