mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 05:44:11 +00:00
drm/i915: Extend i915_request_await_active to use all timelines
Extend i915_request_await_active() to be able to asynchronously wait on all the tracked timelines simultaneously. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200311092044.16353-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
61f874d6e0
commit
29e6ecf3ce
3 changed files with 76 additions and 14 deletions
|
@ -518,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
|
||||
static int __await_active(struct i915_active_fence *active,
|
||||
int (*fn)(void *arg, struct dma_fence *fence),
|
||||
void *arg)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (is_barrier(active)) /* XXX flush the barrier? */
|
||||
return 0;
|
||||
|
||||
fence = i915_active_fence_get(active);
|
||||
if (fence) {
|
||||
int err;
|
||||
|
||||
err = fn(arg, fence);
|
||||
dma_fence_put(fence);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int await_active(struct i915_active *ref,
|
||||
unsigned int flags,
|
||||
int (*fn)(void *arg, struct dma_fence *fence),
|
||||
void *arg)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
/* We must always wait for the exclusive fence! */
|
||||
if (rcu_access_pointer(ref->excl.fence)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
rcu_read_lock();
|
||||
fence = dma_fence_get_rcu_safe(&ref->excl.fence);
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
err = i915_request_await_dma_fence(rq, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
err = __await_active(&ref->excl, fn, arg);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* In the future we may choose to await on all fences */
|
||||
if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
|
||||
struct active_node *it, *n;
|
||||
|
||||
return err;
|
||||
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
||||
err = __await_active(&it->base, fn, arg);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
i915_active_release(ref);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rq_await_fence(void *arg, struct dma_fence *fence)
|
||||
{
|
||||
return i915_request_await_dma_fence(arg, fence);
|
||||
}
|
||||
|
||||
int i915_request_await_active(struct i915_request *rq,
|
||||
struct i915_active *ref,
|
||||
unsigned int flags)
|
||||
{
|
||||
return await_active(ref, flags, rq_await_fence, rq);
|
||||
}
|
||||
|
||||
static int sw_await_fence(void *arg, struct dma_fence *fence)
|
||||
{
|
||||
return i915_sw_fence_await_dma_fence(arg, fence, 0,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
}
|
||||
|
||||
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
|
||||
struct i915_active *ref,
|
||||
unsigned int flags)
|
||||
{
|
||||
return await_active(ref, flags, sw_await_fence, fence);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
|
|
|
@ -183,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
|
|||
|
||||
int i915_active_wait(struct i915_active *ref);
|
||||
|
||||
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
|
||||
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
|
||||
struct i915_active *ref,
|
||||
unsigned int flags);
|
||||
int i915_request_await_active(struct i915_request *rq,
|
||||
struct i915_active *ref,
|
||||
unsigned int flags);
|
||||
#define I915_ACTIVE_AWAIT_ALL BIT(0)
|
||||
|
||||
int i915_active_acquire(struct i915_active *ref);
|
||||
bool i915_active_acquire_if_busy(struct i915_active *ref);
|
||||
|
|
|
@ -1173,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
|
|||
GEM_BUG_ON(!i915_vma_is_pinned(vma));
|
||||
|
||||
/* Wait for the vma to be bound before we start! */
|
||||
err = i915_request_await_active(rq, &vma->active);
|
||||
err = i915_request_await_active(rq, &vma->active, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
Loading…
Reference in a new issue