drm/i915/gem: Split the context's obj:vma lut into its own mutex

Rather than reuse the common ctx->mutex for locking the execbuffer LUT,
split it into its own lock to avoid being taken [as part of ctx->mutex]
at inappropriate times. In particular to avoid the inversion from taking
the timeline->mutex for the whole execbuf submission in the next patch.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200703004306.11117-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-07-03 01:43:06 +01:00
parent 03fca66b7a
commit f7ce8639f6
5 changed files with 26 additions and 13 deletions

View File

@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx)
struct radix_tree_iter iter;
void __rcu **slot;
lockdep_assert_held(&ctx->mutex);
mutex_lock(&ctx->lut_mutex);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
@ -135,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx)
i915_gem_object_put(obj);
}
rcu_read_unlock();
mutex_unlock(&ctx->lut_mutex);
}
static struct intel_context *
@ -342,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
spin_unlock(&ctx->i915->gem.contexts.lock);
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
@ -725,6 +726,7 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->lut_mutex);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@ -1312,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
old = __set_ppgtt(ctx, vm);
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
lut_close(ctx);
old = __set_ppgtt(ctx, vm);
/*
* We need to flush any requests using the current ppgtt before
* we release it as the requests do not hold a reference themselves,
@ -1330,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (err) {
i915_vm_close(__set_ppgtt(ctx, old));
i915_vm_close(old);
lut_close(ctx); /* force a rebuild of the old obj:vma cache */
}
unlock:

View File

@ -170,6 +170,7 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
struct mutex lut_mutex;
/**
* @name: arbitrary name, used for user debug

View File

@ -782,10 +782,15 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
if (!mutex_lock_interruptible(&ctx->mutex)) {
err = -ENOENT;
if (likely(!i915_gem_context_is_closed(ctx)))
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
if (unlikely(vm && vma->vm != vm))
err = -EAGAIN; /* user racing with ctx set-vm */
else if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
else
err = -ENOENT;
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
@ -798,7 +803,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
}
spin_unlock(&obj->lut_lock);
}
mutex_unlock(&ctx->mutex);
mutex_unlock(&ctx->lut_mutex);
}
if (unlikely(err))
goto err;
@ -814,6 +819,8 @@ err:
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
{
struct i915_address_space *vm = eb->context->vm;
do {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@ -821,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
rcu_read_lock();
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
if (likely(vma))
if (likely(vma && vma->vm == vm))
vma = i915_vma_tryget(vma);
rcu_read_unlock();
if (likely(vma))
@ -831,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;

View File

@ -143,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
* vma, in the same fd namespace, by virtue of flink/open.
*/
mutex_lock(&ctx->mutex);
mutex_lock(&ctx->lut_mutex);
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
i915_vma_close(vma);
}
mutex_unlock(&ctx->mutex);
mutex_unlock(&ctx->lut_mutex);
i915_gem_context_put(lut->ctx);
i915_lut_handle_free(lut);

View File

@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
mutex_init(&ctx->mutex);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->mutex);
mutex_init(&ctx->lut_mutex);
if (name) {
struct i915_ppgtt *ppgtt;