mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 05:44:11 +00:00
11ecbdddf2
On Gen11 powergating half the execution units is a functional requirement when using the VME samplers. Not fullfilling this requirement can lead to hangs. This unfortunately plays fairly poorly with the NOA requirements. NOA requires a stable power configuration to maintain its configuration. As a result using OA (and NOA feeding into it) so far has required us to use a power configuration that can work for all contexts. The only power configuration fullfilling this is powergating half the execution units. This makes performance analysis for 3D workloads somewhat pointless. Failing to find a solution that would work for everybody, this change introduces a new i915-perf stream open parameter that punts the decision off to userspace. If this parameter is omitted, the existing Gen11 behavior remains (half EU array powergating). This change takes the initiative to move all perf related sseu configuration into i915_perf.c v2: Make parameter priviliged if different from default v3: Fix context modifying its sseu config while i915-perf is enabled v4: Always consider global sseu a privileged operation (Tvrtko) Override req_sseu point in intel_sseu_make_rpcs() (Tvrtko) Remove unrelated changes (Tvrtko) v5: Some typos (Tvrtko) Process sseu param in read_properties_unlocked() (Tvrtko) v6: Actually commit the bits from v5... Fixup some checkpath warnings v7: Only compare engine uabi field (Chris) Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200317132222.2638719-3-lionel.g.landwerlin@intel.com
232 lines
6.2 KiB
C
232 lines
6.2 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2016 Intel Corporation
|
|
*/
|
|
|
|
#ifndef __I915_GEM_CONTEXT_H__
|
|
#define __I915_GEM_CONTEXT_H__
|
|
|
|
#include "i915_gem_context_types.h"
|
|
|
|
#include "gt/intel_context.h"
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_gem.h"
|
|
#include "i915_scheduler.h"
|
|
#include "intel_device_info.h"
|
|
|
|
struct drm_device;
|
|
struct drm_file;
|
|
|
|
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(CONTEXT_CLOSED, &ctx->flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
|
|
{
|
|
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
|
|
set_bit(CONTEXT_CLOSED, &ctx->flags);
|
|
}
|
|
|
|
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
|
|
{
|
|
set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
|
|
{
|
|
clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
|
|
{
|
|
set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
|
|
{
|
|
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
|
|
{
|
|
set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
|
|
{
|
|
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
|
|
{
|
|
set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
|
|
{
|
|
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
}
|
|
|
|
static inline bool
|
|
i915_gem_context_user_engines(const struct i915_gem_context *ctx)
|
|
{
|
|
return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
}
|
|
|
|
static inline void
|
|
i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
|
|
{
|
|
set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
}
|
|
|
|
static inline void
|
|
i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
|
|
{
|
|
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
}
|
|
|
|
/* i915_gem_context.c */
|
|
void i915_gem_init__contexts(struct drm_i915_private *i915);
|
|
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
|
|
|
|
int i915_gem_context_open(struct drm_i915_private *i915,
|
|
struct drm_file *file);
|
|
void i915_gem_context_close(struct drm_file *file);
|
|
|
|
void i915_gem_context_release(struct kref *ctx_ref);
|
|
|
|
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
|
|
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file_priv);
|
|
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file_priv);
|
|
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
|
|
static inline struct i915_gem_context *
|
|
i915_gem_context_get(struct i915_gem_context *ctx)
|
|
{
|
|
kref_get(&ctx->ref);
|
|
return ctx;
|
|
}
|
|
|
|
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
|
|
{
|
|
kref_put(&ctx->ref, i915_gem_context_release);
|
|
}
|
|
|
|
static inline struct i915_address_space *
|
|
i915_gem_context_vm(struct i915_gem_context *ctx)
|
|
{
|
|
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
|
|
}
|
|
|
|
static inline struct i915_address_space *
|
|
i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
|
|
{
|
|
struct i915_address_space *vm;
|
|
|
|
rcu_read_lock();
|
|
vm = rcu_dereference(ctx->vm);
|
|
if (!vm)
|
|
vm = &ctx->i915->ggtt.vm;
|
|
vm = i915_vm_get(vm);
|
|
rcu_read_unlock();
|
|
|
|
return vm;
|
|
}
|
|
|
|
static inline struct i915_gem_engines *
|
|
i915_gem_context_engines(struct i915_gem_context *ctx)
|
|
{
|
|
return rcu_dereference_protected(ctx->engines,
|
|
lockdep_is_held(&ctx->engines_mutex));
|
|
}
|
|
|
|
static inline struct i915_gem_engines *
|
|
i915_gem_context_lock_engines(struct i915_gem_context *ctx)
|
|
__acquires(&ctx->engines_mutex)
|
|
{
|
|
mutex_lock(&ctx->engines_mutex);
|
|
return i915_gem_context_engines(ctx);
|
|
}
|
|
|
|
static inline void
|
|
i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
|
|
__releases(&ctx->engines_mutex)
|
|
{
|
|
mutex_unlock(&ctx->engines_mutex);
|
|
}
|
|
|
|
static inline struct intel_context *
|
|
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
|
|
{
|
|
struct intel_context *ce;
|
|
|
|
rcu_read_lock(); {
|
|
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
|
|
if (unlikely(!e)) /* context was closed! */
|
|
ce = ERR_PTR(-ENOENT);
|
|
else if (likely(idx < e->num_engines && e->engines[idx]))
|
|
ce = intel_context_get(e->engines[idx]);
|
|
else
|
|
ce = ERR_PTR(-EINVAL);
|
|
} rcu_read_unlock();
|
|
|
|
return ce;
|
|
}
|
|
|
|
static inline void
|
|
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
|
|
struct i915_gem_engines *engines)
|
|
{
|
|
it->engines = engines;
|
|
it->idx = 0;
|
|
}
|
|
|
|
struct intel_context *
|
|
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
|
|
|
|
#define for_each_gem_engine(ce, engines, it) \
|
|
for (i915_gem_engines_iter_init(&(it), (engines)); \
|
|
((ce) = i915_gem_engines_iter_next(&(it)));)
|
|
|
|
struct i915_lut_handle *i915_lut_handle_alloc(void);
|
|
void i915_lut_handle_free(struct i915_lut_handle *lut);
|
|
|
|
int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
|
|
const struct drm_i915_gem_context_param_sseu *user,
|
|
struct intel_sseu *context);
|
|
|
|
#endif /* !__I915_GEM_CONTEXT_H__ */
|