drm/i915/gt: Provide a local intel_context.vm

Track the currently bound address space used by the HW context. Minor
conversions to use the local intel_context.vm are made, leaving behind
some more surgery required to make intel_context the primary through the
selftests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730143209.4549-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-07-30 15:32:09 +01:00
parent c082afac86
commit f5d974f9d2
10 changed files with 31 additions and 32 deletions

View file

@ -250,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
sleeve = create_sleeve(vm, obj, pages, page_sizes);
sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);

View file

@ -475,10 +475,18 @@ static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
struct i915_gem_engines_iter it;
struct intel_context *ce;
ctx->vm = i915_vm_get(vm);
ctx->desc_template = default_desc_template(ctx->i915, vm);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
}
i915_gem_context_unlock_engines(ctx);
return old;
}
@ -1004,7 +1012,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
struct i915_address_space *vm = rq->gem_context->vm;
struct i915_address_space *vm = rq->hw_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@ -1113,9 +1121,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
ctx->vm = old;
ctx->desc_template = default_desc_template(ctx->i915, old);
i915_vm_put(vm);
i915_vm_put(__set_ppgtt(ctx, old));
i915_vm_put(old);
}
unlock:

View file

@ -223,7 +223,6 @@ struct i915_execbuffer {
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
@ -697,7 +696,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
err = i915_gem_evict_vm(eb->vm);
err = i915_gem_evict_vm(eb->context->vm);
if (err)
return err;
break;
@ -725,12 +724,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
if (ctx->vm) {
eb->vm = ctx->vm;
if (ctx->vm)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
} else {
eb->vm = &eb->i915->ggtt.vm;
}
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@ -832,7 +827,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_vma;
}
vma = i915_vma_instance(obj, eb->vm, NULL);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;

View file

@ -47,15 +47,11 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gem_context *ctx = ce->gem_context;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
int err;
/* XXX: ce->vm please */
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);

View file

@ -747,7 +747,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);

View file

@ -191,6 +191,8 @@ intel_context_init(struct intel_context *ce,
kref_init(&ce->ref);
ce->gem_context = ctx;
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
@ -206,6 +208,8 @@ intel_context_init(struct intel_context *ce,
void intel_context_fini(struct intel_context *ce)
{
i915_vm_put(ce->vm);
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
}

View file

@ -36,7 +36,6 @@ struct intel_context_ops {
struct intel_context {
struct kref ref;
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
@ -44,6 +43,9 @@ struct intel_context {
#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight)
#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight)
struct i915_address_space *vm;
struct i915_gem_context *gem_context;
struct list_head signal_link;
struct list_head signals;

View file

@ -1605,8 +1605,6 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
GEM_BUG_ON(!ce->gem_context->vm);
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
@ -1716,8 +1714,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
struct i915_ppgtt * const ppgtt =
i915_vm_to_ppgtt(rq->gem_context->vm);
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
int err, i;
u32 *cs;
@ -1790,7 +1787,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
if (i915_vm_is_4lvl(request->gem_context->vm))
if (i915_vm_is_4lvl(request->hw_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@ -2920,7 +2917,7 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;

View file

@ -1380,9 +1380,9 @@ static struct i915_address_space *vm_alias(struct intel_context *ce)
{
struct i915_address_space *vm;
vm = ce->gem_context->vm;
if (!vm)
vm = &ce->engine->gt->ggtt->alias->vm;
vm = ce->vm;
if (i915_is_ggtt(vm))
vm = &i915_vm_to_ggtt(vm)->alias->vm;
return vm;
}

View file

@ -1156,7 +1156,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);