drm/msm/gpu: Drop duplicate fence counter

The ring seqno counter duplicates the fence-context last_fence counter.
They end up getting incremented in lock-step, on the same scheduler
thread, but the split just makes things less obvious.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20220411215849.297838-3-robdclark@gmail.com
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Rob Clark 2022-04-11 14:58:31 -07:00
parent 695383a138
commit f9d5355fa5
6 changed files with 9 additions and 10 deletions

View file

@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),

View file

@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR),

View file

@ -578,7 +578,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova;
state->ring[i].seqno = gpu->rb[i]->seqno;
state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]);
@ -828,7 +828,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
ring->seqno);
ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));

View file

@ -523,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
} else if (fence_before(fence, ring->seqno)) {
} else if (fence_before(fence, ring->fctx->last_fence)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@ -531,13 +531,13 @@ static void hangcheck_handler(struct timer_list *t)
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno);
gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (fence_after(ring->seqno, ring->hangcheck_fence))
if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@ -754,7 +754,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gpu_hw_init(gpu);
submit->seqno = ++ring->seqno;
submit->seqno = submit->hw_fence->seqno;
msm_rd_dump_submit(priv->rd, submit, NULL);

View file

@ -291,7 +291,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
if (fence_after(ring->seqno, ring->memptrs->fence))
if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
return true;
}

View file

@ -59,7 +59,6 @@ struct msm_ringbuffer {
spinlock_t submit_lock;
uint64_t iova;
uint32_t seqno;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;