drm/i915/gt: Replace intel_engine_transfer_stale_breadcrumbs

After staring at the breadcrumb enabling/cancellation and coming to the
conclusion that the cause of the mysterious stale breadcrumbs must the
act of submitting a completed requests, we can then redirect those
completed requests onto a dedicated signaled_list at the time of
construction and so eliminate intel_engine_transfer_stale_breadcrumbs().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200731154834.8378-2-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Chris Wilson 2020-07-31 16:48:32 +01:00 committed by Joonas Lahtinen
parent c18636f763
commit 2854d86632
4 changed files with 21 additions and 52 deletions

View file

@ -142,16 +142,16 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(engine, tl);
}
static void __signal_request(struct i915_request *rq, struct list_head *signals)
static bool __signal_request(struct i915_request *rq, struct list_head *signals)
{
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (!__dma_fence_signal(&rq->fence))
return;
return false;
i915_request_get(rq);
list_add_tail(&rq->signal_link, signals);
return true;
}
static void signal_irq_work(struct irq_work *work)
@ -278,32 +278,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
struct intel_context *ce)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
spin_lock_irqsave(&b->irq_lock, flags);
if (!list_empty(&ce->signals)) {
struct i915_request *rq, *next;
/* Queue for executing the signal callbacks in the irq_work */
list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
GEM_BUG_ON(rq->engine != engine);
GEM_BUG_ON(!__request_completed(rq));
__signal_request(rq, &b->signaled_requests);
}
INIT_LIST_HEAD(&ce->signals);
list_del_init(&ce->signal_link);
irq_work_queue(&b->irq_work);
}
spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
}
@ -317,6 +291,17 @@ static void insert_breadcrumb(struct i915_request *rq,
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
if (__request_completed(rq)) {
if (__signal_request(rq, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
__intel_breadcrumbs_arm_irq(b);
/*
@ -344,8 +329,11 @@ static void insert_breadcrumb(struct i915_request *rq,
if (pos == &ce->signals) /* catch transitions from empty list */
list_move_tail(&ce->signal_link, &b->signalers);
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/* Check after attaching to irq, interrupt may have already fired. */
if (__request_completed(rq))
irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
@ -401,7 +389,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
spin_unlock(&b->irq_lock);
return !__request_completed(rq);
return true;
}
void i915_request_cancel_breadcrumb(struct i915_request *rq)

View file

@ -237,9 +237,6 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
struct intel_context *ce);
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
struct drm_printer *p);

View file

@ -1805,18 +1805,6 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true;
}
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
{
/*
* All the outstanding signals on ve->siblings[0] must have
* been completed, just pending the interrupt handler. As those
* signals still refer to the old sibling (via rq->engine), we must
* transfer those to the old irq_worker to keep our locking
* consistent.
*/
intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
}
#define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \
@ -2267,9 +2255,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
virtual_update_register_offsets(regs,
engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve);
/*
* Move the bound engine to the top of the list
* for future execution. We then kick this

View file

@ -569,9 +569,8 @@ bool __i915_request_submit(struct i915_request *request)
*/
__notify_execute_cb(request);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_signal_breadcrumbs(engine);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_enable_breadcrumb(request);
return result;
}