linux-stable/drivers/gpu/drm/i915/gem/i915_gem_context.h
Chris Wilson a0e047156c drm/i915/gem: Make context persistence optional
Our existing behaviour is to allow contexts and their GPU requests to
persist past the point of closure until the requests are complete. This
allows clients to operate in a 'fire-and-forget' manner where they can
setup a rendering pipeline and hand it over to the display server and
immediately exit. As the rendering pipeline is kept alive until
completion, the display server (or other consumer) can use the results
in the future and present them to the user.

The compute model is a little different. They have little to no buffer
sharing between processes as their kernels tend to operate on a
continuous stream, feeding the results back to the client application.
These kernels operate for an indeterminate length of time, with many
clients wishing that the kernel was always running for as long as they
keep feeding in the data, i.e. acting like a DSP.

Not all clients want this persistent "desktop" behaviour and would prefer
that the contexts are cleaned up immediately upon closure. This ensures
that when clients are run without hangchecking (e.g. for compute kernels
of indeterminate runtime), any GPU hang or other unexpected workloads
are terminated with the process and does not continue to hog resources.

The default behaviour for new contexts is the legacy persistence mode,
as some desktop applications are dependent upon the existing behaviour.
New clients will have to opt in to immediate cleanup on context
closure. If the hangchecking modparam is disabled, so is persistent
context support -- all contexts will be terminated on closure.

We expect this behaviour change to be welcomed by compute users, who
have often been caught between a rock and a hard place. They disable
hangchecking to avoid their kernels being "unfairly" declared hung, but
have also experienced true hangs that the system was then unable to
clean up. Naturally, this leads to bug reports.

Testcase: igt/gem_ctx_persistence
Link: https://github.com/intel/compute-runtime/pull/228
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20191029202338.8841-1-chris@chris-wilson.co.uk
2019-10-29 21:02:52 +00:00

272 lines
7.1 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2016 Intel Corporation
*/
#ifndef __I915_GEM_CONTEXT_H__
#define __I915_GEM_CONTEXT_H__
#include "i915_gem_context_types.h"
#include "gt/intel_context.h"
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
struct drm_device;
struct drm_file;
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_CLOSED, &ctx->flags);
}
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
{
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
set_bit(CONTEXT_CLOSED, &ctx->flags);
}
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
{
return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
{
set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
{
clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
{
return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
{
set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
{
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
{
return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
{
set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
{
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
{
return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
{
set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
{
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
{
set_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
{
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
static inline bool
i915_gem_context_user_engines(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
static inline void
i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
{
set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
static inline void
i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
{
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
static inline bool
i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline void
i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
{
set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline void
i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
{
clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
}
/* i915_gem_context.c */
int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
void i915_gem_context_release(struct kref *ctx_ref);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_release);
}
static inline struct i915_address_space *
i915_gem_context_vm(struct i915_gem_context *ctx)
{
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
}
static inline struct i915_address_space *
i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
rcu_read_lock();
vm = rcu_dereference(ctx->vm);
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = i915_vm_get(vm);
rcu_read_unlock();
return vm;
}
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{
return rcu_dereference_protected(ctx->engines,
lockdep_is_held(&ctx->engines_mutex));
}
static inline struct i915_gem_engines *
i915_gem_context_lock_engines(struct i915_gem_context *ctx)
__acquires(&ctx->engines_mutex)
{
mutex_lock(&ctx->engines_mutex);
return i915_gem_context_engines(ctx);
}
static inline void
i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
__releases(&ctx->engines_mutex)
{
mutex_unlock(&ctx->engines_mutex);
}
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
struct intel_context *ce = ERR_PTR(-EINVAL);
rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]);
} rcu_read_unlock();
return ce;
}
static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
#define for_each_gem_engine(ce, engines, it) \
for (i915_gem_engines_iter_init(&(it), (engines)); \
((ce) = i915_gem_engines_iter_next(&(it)));)
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
#endif /* !__I915_GEM_CONTEXT_H__ */