2005-04-16 22:20:36 +00:00
|
|
|
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
|
|
|
|
*/
|
2006-01-02 09:14:23 +00:00
|
|
|
/*
|
2005-06-23 12:46:46 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
|
|
|
* All Rights Reserved.
|
2005-06-23 12:46:46 +00:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
|
|
|
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
|
|
|
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
|
|
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
|
|
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
2006-01-02 09:14:23 +00:00
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifndef _I915_DRV_H_
|
|
|
|
#define _I915_DRV_H_
|
|
|
|
|
2012-12-03 21:03:14 +00:00
|
|
|
#include <uapi/drm/i915_drm.h>
|
|
|
|
|
drm/i915: irq-drive the dp aux communication
At least on the platforms that have a dp aux irq and also have it
enabled - vlvhsw should have one, too. But I don't have a machine to
test this on. Judging from docs there's no dp aux interrupt for gm45.
Also, I only have an ivb cpu edp machine, so the dp aux A code for
snb/ilk is untested.
For dpcd probing when nothing is connected it slashes about 5ms of cpu
time (cpu time is now negligible), which agrees with 3 * 5 400 usec
timeouts.
A previous version of this patch increases the time required to go
through the dp_detect cycle (which includes reading the edid) from
around 33 ms to around 40 ms. Experiments indicated that this is
purely due to the irq latency - the hw doesn't allow us to queue up
dp aux transactions and hence irq latency directly affects throughput.
gmbus is much better, there we have a 8 byte buffer, and we get the
irq once another 4 bytes can be queued up.
But by using the pm_qos interface to request the lowest possible cpu
wake-up latency this slowdown completely disappeared.
Since all our output detection logic is single-threaded with the
mode_config mutex right now anyway, I've decide not ot play fancy and
to just reuse the gmbus wait queue. But this would definitely prep the
way to run dp detection on different ports in parallel
v2: Add a timeout for dp aux transfers when using interrupts - the hw
_does_ prevent this with the hw-based 400 usec timeout, but if the
irq somehow doesn't arrive we're screwed. Lesson learned while
developing this ;-)
v3: While at it also convert the busy-loop to wait_for_atomic, so that
we don't run the risk of an infinite loop any more.
v4: Ensure we have the smallest possible irq latency by using the
pm_qos interface.
v5: Add a comment to the code to explain why we frob pm_qos. Suggested
by Chris Wilson.
v6: Disable dp irq for vlv, that's easier than trying to get at docs
and hw.
v7: Squash in a fix for Haswell that Paulo Zanoni tracked down - the
dp aux registers aren't at a fixed offset any more, but can be on the
PCH while the DP port is on the cpu die.
Reviewed-by: Imre Deak <imre.deak@intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-12-01 12:53:48 +00:00
|
|
|
#include <linux/pm_qos.h>
|
2016-04-13 16:35:01 +00:00
|
|
|
|
2021-06-02 08:38:08 +00:00
|
|
|
#include <drm/ttm/ttm_device.h>
|
2016-04-13 16:35:01 +00:00
|
|
|
|
2023-01-16 16:46:44 +00:00
|
|
|
#include "display/intel_display_limits.h"
|
2022-08-24 13:15:27 +00:00
|
|
|
#include "display/intel_display_core.h"
|
2019-06-13 08:44:16 +00:00
|
|
|
|
2019-08-08 13:42:48 +00:00
|
|
|
#include "gem/i915_gem_context_types.h"
|
2019-08-08 13:42:49 +00:00
|
|
|
#include "gem/i915_gem_shrinker.h"
|
2019-08-08 13:42:48 +00:00
|
|
|
#include "gem/i915_gem_stolen.h"
|
|
|
|
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "gt/intel_engine.h"
|
2019-06-21 07:07:40 +00:00
|
|
|
#include "gt/intel_gt_types.h"
|
2021-01-12 16:43:00 +00:00
|
|
|
#include "gt/intel_region_lmem.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "gt/intel_workarounds.h"
|
2019-07-13 10:00:11 +00:00
|
|
|
#include "gt/uc/intel_uc.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
|
2022-12-08 14:23:47 +00:00
|
|
|
#include "soc/intel_pch.h"
|
|
|
|
|
2022-04-01 14:21:58 +00:00
|
|
|
#include "i915_drm_client.h"
|
2022-02-10 15:45:52 +00:00
|
|
|
#include "i915_gem.h"
|
|
|
|
#include "i915_gpu_error.h"
|
|
|
|
#include "i915_params.h"
|
|
|
|
#include "i915_perf_types.h"
|
|
|
|
#include "i915_scheduler.h"
|
|
|
|
#include "i915_utils.h"
|
2017-12-21 21:57:32 +00:00
|
|
|
#include "intel_device_info.h"
|
2021-03-26 13:21:32 +00:00
|
|
|
#include "intel_memory_region.h"
|
2019-04-29 12:29:36 +00:00
|
|
|
#include "intel_runtime_pm.h"
|
2021-03-26 13:21:32 +00:00
|
|
|
#include "intel_step.h"
|
2017-12-21 21:57:31 +00:00
|
|
|
#include "intel_uncore.h"
|
2016-04-13 16:35:01 +00:00
|
|
|
|
2022-02-03 14:02:33 +00:00
|
|
|
struct drm_i915_clock_gating_funcs;
|
2022-02-03 14:02:27 +00:00
|
|
|
struct vlv_s0ix_state;
|
drm/i915/pxp: Promote pxp subsystem to top-level of i915
Starting with MTL, there will be two GT-tiles, a render and media
tile. PXP as a service for supporting workloads with protected
contexts and protected buffers can be subscribed by process
workloads on any tile. However, depending on the platform,
only one of the tiles is used for control events pertaining to PXP
operation (such as creating the arbitration session and session
tear-down).
PXP as a global feature is accessible via batch buffer instructions
on any engine/tile and the coherency across tiles is handled implicitly
by the HW. In fact, for the foreseeable future, we are expecting this
single-control-tile for the PXP subsystem.
In MTL, it's the standalone media tile (not the root tile) because
it contains the VDBOX and KCR engine (among the assets PXP relies on
for those events).
Looking at the current code design, each tile is represented by the
intel_gt structure while the intel_pxp structure currently hangs off the
intel_gt structure.
Keeping the intel_pxp structure within the intel_gt structure makes some
internal functionalities more straight forward but adds code complexity to
code readability and maintainibility to many external-to-pxp subsystems
which may need to pick the correct intel_gt structure. An example of this
would be the intel_pxp_is_active or intel_pxp_is_enabled functionality
which should be viewed as a global level inquiry, not a per-gt inquiry.
That said, this series promotes the intel_pxp structure into the
drm_i915_private structure making it a top-level subsystem and the PXP
subsystem will select the control gt internally and keep a pointer to
it for internal reference.
This promotion comes with two noteworthy changes:
1. Exported pxp functions that are called by external subsystems
(such as intel_pxp_enabled/active) will have to check implicitly
if i915->pxp is valid as that structure will not be allocated
for HW that doesn't support PXP.
2. Since GT is now considered a soft-dependency of PXP we are
ensuring that GT init happens before PXP init and vice versa
for fini. This causes a minor ordering change whereby we previously
called intel_pxp_suspend after intel_uc_suspend but now is before
i915_gem_suspend_late but the change is required for correct
dependency flows. Additionally, this re-order change doesn't
have any impact because at that point in either case, the top level
entry to i915 won't observe any PXP events (since the GPU was
quiesced during suspend_prepare). Also, any PXP event doesn't
really matter when we disable the PXP HW (global GT irqs are
already off anyway, so even if there was a bug that generated
spurious events we wouldn't see it and we would just clean it
up on resume which is okay since the default fallback action
for PXP would be to keep the sessions off at this suspend stage).
Changes from prior revs:
v11: - Reformat a comment (Tvrtko).
v10: - Change the code flow for intel_pxp_init to make it more
cleaner and readible with better comments explaining the
difference between full-PXP-feature vs the partial-teelink
inits depending on the platform. Additionally, only do
the pxp allocation when we are certain the subsystem is
needed. (Tvrtko).
v9: - Cosmetic cleanups in supported/enabled/active. (Daniele).
- Add comments for intel_pxp_init and pxp_get_ctrl_gt that
explain the functional flow for when PXP is not supported
but the backend-assets are needed for HuC authentication
(Daniele and Tvrtko).
- Fix two remaining functions that are accessible outside
PXP that need to be checking pxp ptrs before using them:
intel_pxp_irq_handler and intel_pxp_huc_load_and_auth
(Tvrtko and Daniele).
- User helper macro in pxp-debugfs (Tvrtko).
v8: - Remove pxp_to_gt macro (Daniele).
- Fix a bug in pxp_get_ctrl_gt for the case of MTL and we don't
support GSC-FW on it. (Daniele).
- Leave i915->pxp as NULL if we dont support PXP and in line
with that, do additional validity check on i915->pxp for
intel_pxp_is_supported/enabled/active (Daniele).
- Remove unncessary include header from intel_gt_debugfs.c
and check drm_minor i915->drm.primary (Daniele).
- Other cosmetics / minor issues / more comments on suspend
flow order change (Daniele).
v7: - Drop i915_dev_to_pxp and in intel_pxp_init use 'i915->pxp'
through out instead of local variable newpxp. (Rodrigo)
- In the case intel_pxp_fini is called during driver unload but
after i915 loading failed without pxp being allocated, check
i915->pxp before referencing it. (Alan)
v6: - Remove HAS_PXP macro and replace it with intel_pxp_is_supported
because : [1] introduction of 'ctrl_gt' means we correct this
for MTL's upcoming series now. [2] Also, this has little impact
globally as its only used by PXP-internal callers at the moment.
- Change intel_pxp_init/fini to take in i915 as its input to avoid
ptr-to-ptr in init/fini calls.(Jani).
- Remove the backpointer from pxp->i915 since we can use
pxp->ctrl_gt->i915 if we need it. (Rodrigo).
v5: - Switch from series to single patch (Rodrigo).
- change function name from pxp_get_kcr_owner_gt to
pxp_get_ctrl_gt.
- Fix CI BAT failure by removing redundant call to intel_pxp_fini
from driver-remove.
- NOTE: remaining open still persists on using ptr-to-ptr
and back-ptr.
v4: - Instead of maintaining intel_pxp as an intel_gt structure member
and creating a number of convoluted helpers that takes in i915 as
input and redirects to the correct intel_gt or takes any intel_gt
and internally replaces with the correct intel_gt, promote it to
be a top-level i915 structure.
v3: - Rename gt level helper functions to "intel_pxp_is_enabled/
supported/ active_on_gt" (Daniele)
- Upgrade _gt_supports_pxp to replace what was intel_gtpxp_is
supported as the new intel_pxp_is_supported_on_gt to check for
PXP feature support vs the tee support for huc authentication.
Fix pxp-debugfs-registration to use only the former to decide
support. (Daniele)
- Couple minor optimizations.
v2: - Avoid introduction of new device info or gt variables and use
existing checks / macros to differentiate the correct GT->PXP
control ownership (Daniele Ceraolo Spurio)
- Don't reuse the updated global-checkers for per-GT callers (such
as other files within PXP) to avoid unnecessary GT-reparsing,
expose a replacement helper like the prior ones. (Daniele).
v1: - Add one more patch to the series for the intel_pxp suspend/resume
for similar refactoring
References: https://patchwork.freedesktop.org/patch/msgid/20221202011407.4068371-1-alan.previn.teres.alexis@intel.com
Signed-off-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221208180542.998148-1-alan.previn.teres.alexis@intel.com
2022-12-08 18:05:42 +00:00
|
|
|
struct intel_pxp;
|
2019-05-28 09:29:42 +00:00
|
|
|
|
2022-08-24 13:15:59 +00:00
|
|
|
#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
|
|
|
|
|
2023-01-16 17:34:21 +00:00
|
|
|
/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
|
|
|
|
struct i915_dsm {
|
|
|
|
/*
|
|
|
|
* The start and end of DSM which we can optionally use to create GEM
|
|
|
|
* objects backed by stolen memory.
|
|
|
|
*
|
|
|
|
* Note that usable_size tells us exactly how much of this we are
|
|
|
|
* actually allowed to use, given that some portion of it is in fact
|
|
|
|
* reserved for use by hardware functions.
|
|
|
|
*/
|
|
|
|
struct resource stolen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserved portion of DSM.
|
|
|
|
*/
|
|
|
|
struct resource reserved;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Total size minus reserved ranges.
|
|
|
|
*
|
|
|
|
* DSM is segmented in hardware with different portions offlimits to
|
|
|
|
* certain functions.
|
|
|
|
*
|
|
|
|
* The drm_mm is initialised to the total accessible range, as found
|
|
|
|
* from the PCI config. On Broadwell+, this is further restricted to
|
|
|
|
* avoid the first page! The upper end of DSM is reserved for hardware
|
|
|
|
* functions and similarly removed from the accessible range.
|
|
|
|
*/
|
|
|
|
resource_size_t usable_size;
|
|
|
|
};
|
|
|
|
|
2012-11-02 18:55:02 +00:00
|
|
|
struct i915_suspend_saved_registers {
|
2008-05-07 02:27:53 +00:00
|
|
|
u32 saveDSPARB;
|
2007-11-22 04:14:14 +00:00
|
|
|
u32 saveSWF0[16];
|
|
|
|
u32 saveSWF1[16];
|
2015-09-18 17:03:43 +00:00
|
|
|
u32 saveSWF3[3];
|
2014-12-10 20:16:05 +00:00
|
|
|
u16 saveGCDGMBUS;
|
2012-11-02 18:55:02 +00:00
|
|
|
};
|
2012-11-02 18:55:03 +00:00
|
|
|
|
2013-09-19 18:13:41 +00:00
|
|
|
#define MAX_L3_SLICES 2
|
2012-11-02 18:55:07 +00:00
|
|
|
struct intel_l3_parity {
|
2013-09-19 18:13:41 +00:00
|
|
|
u32 *remap_info[MAX_L3_SLICES];
|
2012-11-02 18:55:07 +00:00
|
|
|
struct work_struct error_work;
|
2013-09-19 18:13:41 +00:00
|
|
|
int which_slice;
|
2012-11-02 18:55:07 +00:00
|
|
|
};
|
|
|
|
|
2012-11-14 16:14:03 +00:00
|
|
|
struct i915_gem_mm {
|
2021-04-21 10:46:55 +00:00
|
|
|
/*
|
|
|
|
* Shortcut for the stolen region. This points to either
|
|
|
|
* INTEL_REGION_STOLEN_SMEM for integrated platforms, or
|
|
|
|
* INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
|
|
|
|
* support stolen.
|
|
|
|
*/
|
|
|
|
struct intel_memory_region *stolen_region;
|
2012-11-14 16:14:03 +00:00
|
|
|
/** Memory allocator for GTT stolen memory */
|
|
|
|
struct drm_mm stolen;
|
2015-07-02 22:25:09 +00:00
|
|
|
/** Protects the usage of the GTT stolen memory allocator. This is
|
|
|
|
* always the inner lock when overlapping with struct_mutex. */
|
|
|
|
struct mutex stolen_lock;
|
|
|
|
|
2017-10-16 11:40:37 +00:00
|
|
|
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
|
|
|
|
spinlock_t obj_lock;
|
|
|
|
|
2012-11-14 16:14:03 +00:00
|
|
|
/**
|
2019-06-12 10:57:20 +00:00
|
|
|
* List of objects which are purgeable.
|
2012-11-14 16:14:03 +00:00
|
|
|
*/
|
2019-06-12 10:57:20 +00:00
|
|
|
struct list_head purge_list;
|
|
|
|
|
2019-05-30 20:34:59 +00:00
|
|
|
/**
|
2019-06-12 10:57:20 +00:00
|
|
|
* List of objects which have allocated pages and are shrinkable.
|
2019-05-30 20:34:59 +00:00
|
|
|
*/
|
2019-06-12 10:57:20 +00:00
|
|
|
struct list_head shrink_list;
|
2012-11-14 16:14:03 +00:00
|
|
|
|
2016-10-28 12:58:42 +00:00
|
|
|
/**
|
|
|
|
* List of objects which are pending destruction.
|
|
|
|
*/
|
|
|
|
struct llist_head free_list;
|
2022-07-26 14:48:44 +00:00
|
|
|
struct work_struct free_work;
|
2018-02-19 22:06:31 +00:00
|
|
|
/**
|
|
|
|
* Count of objects pending destructions. Used to skip needlessly
|
|
|
|
* waiting on an RCU barrier if no objects are waiting to be freed.
|
|
|
|
*/
|
|
|
|
atomic_t free_count;
|
2016-10-28 12:58:42 +00:00
|
|
|
|
2017-10-06 22:18:14 +00:00
|
|
|
/**
|
|
|
|
* tmpfs instance used for shmem backed objects
|
|
|
|
*/
|
|
|
|
struct vfsmount *gemfs;
|
|
|
|
|
2019-10-18 09:07:49 +00:00
|
|
|
struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
|
|
|
|
|
2014-05-20 07:28:43 +00:00
|
|
|
struct notifier_block oom_notifier;
|
2016-04-04 13:46:43 +00:00
|
|
|
struct notifier_block vmap_notifier;
|
2014-03-25 13:23:04 +00:00
|
|
|
struct shrinker shrinker;
|
2012-11-14 16:14:03 +00:00
|
|
|
|
2021-03-23 15:50:04 +00:00
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
2017-06-16 14:05:22 +00:00
|
|
|
/**
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
* notifier_lock for mmu notifiers, memory may not be allocated
|
|
|
|
* while holding this lock.
|
2017-06-16 14:05:22 +00:00
|
|
|
*/
|
2021-06-10 14:35:25 +00:00
|
|
|
rwlock_t notifier_lock;
|
2021-03-23 15:50:04 +00:00
|
|
|
#endif
|
2017-06-16 14:05:22 +00:00
|
|
|
|
2019-05-30 20:35:00 +00:00
|
|
|
/* shrinker accounting, also useful for userland debugging */
|
|
|
|
u64 shrink_memory;
|
|
|
|
u32 shrink_count;
|
2012-11-14 16:14:03 +00:00
|
|
|
};
|
|
|
|
|
2015-02-10 11:05:47 +00:00
|
|
|
struct i915_virtual_gpu {
|
2019-08-23 06:57:31 +00:00
|
|
|
struct mutex lock; /* serialises sending of g2v_notify command pkts */
|
2015-02-10 11:05:47 +00:00
|
|
|
bool active;
|
2017-08-14 07:20:46 +00:00
|
|
|
u32 caps;
|
2022-04-07 07:19:44 +00:00
|
|
|
u32 *initial_mmio;
|
|
|
|
u8 *initial_cfg_space;
|
2022-04-11 14:13:35 +00:00
|
|
|
struct list_head entry;
|
2015-02-10 11:05:47 +00:00
|
|
|
};
|
|
|
|
|
2019-11-01 10:15:28 +00:00
|
|
|
struct i915_selftest_stash {
|
|
|
|
atomic_t counter;
|
2021-06-02 08:38:08 +00:00
|
|
|
struct ida mock_region_instances;
|
2019-11-01 10:15:28 +00:00
|
|
|
};
|
|
|
|
|
2014-03-31 11:27:22 +00:00
|
|
|
struct drm_i915_private {
|
2016-06-24 13:00:18 +00:00
|
|
|
struct drm_device drm;
|
|
|
|
|
2022-08-24 13:15:27 +00:00
|
|
|
struct intel_display display;
|
|
|
|
|
2020-03-23 14:49:07 +00:00
|
|
|
/* FIXME: Device release actions should all be moved to drmm_ */
|
|
|
|
bool do_release;
|
|
|
|
|
2020-06-18 15:04:02 +00:00
|
|
|
/* i915 device parameters */
|
|
|
|
struct i915_params params;
|
|
|
|
|
2018-12-31 14:56:46 +00:00
|
|
|
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
|
2018-12-31 14:56:41 +00:00
|
|
|
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
|
2018-02-07 21:05:43 +00:00
|
|
|
struct intel_driver_caps caps;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2023-01-16 17:34:21 +00:00
|
|
|
struct i915_dsm dsm;
|
2017-12-11 15:18:21 +00:00
|
|
|
|
2013-07-19 19:36:52 +00:00
|
|
|
struct intel_uncore uncore;
|
2019-08-09 06:31:16 +00:00
|
|
|
struct intel_uncore_mmio_debug mmio_debug;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2015-02-10 11:05:47 +00:00
|
|
|
struct i915_virtual_gpu vgpu;
|
|
|
|
|
2016-10-20 09:15:03 +00:00
|
|
|
struct intel_gvt *gvt;
|
drm/i915: gvt: Introduce the basic architecture of GVT-g
This patch introduces the very basic framework of GVT-g device model,
includes basic prototypes, definitions, initialization.
v12:
- Call intel_gvt_init() in driver early initialization stage. (Chris)
v8:
- Remove the GVT idr and mutex in intel_gvt_host. (Joonas)
v7:
- Refine the URL link in Kconfig. (Joonas)
- Refine the introduction of GVT-g host support in Kconfig. (Joonas)
- Remove the macro GVT_ALIGN(), use round_down() instead. (Joonas)
- Make "struct intel_gvt" a data member in struct drm_i915_private.(Joonas)
- Remove {alloc, free}_gvt_device()
- Rename intel_gvt_{create, destroy}_gvt_device()
- Expost intel_gvt_init_host()
- Remove the dummy "struct intel_gvt" declaration in intel_gvt.h (Joonas)
v6:
- Refine introduction in Kconfig. (Chris)
- The exposed API functions will take struct intel_gvt * instead of
void *. (Chris/Tvrtko)
- Remove most memebers of strct intel_gvt_device_info. Will add them
in the device model patches.(Chris)
- Remove gvt_info() and gvt_err() in debug.h. (Chris)
- Move GVT kernel parameter into i915_params. (Chris)
- Remove include/drm/i915_gvt.h, as GVT-g will be built within i915.
- Remove the redundant struct i915_gvt *, as the functions in i915
will directly take struct intel_gvt *.
- Add more comments for reviewer.
v5:
Take Tvrtko's comments:
- Fix the misspelled words in Kconfig
- Let functions take drm_i915_private * instead of struct drm_device *
- Remove redundant prints/local varible initialization
v3:
Take Joonas' comments:
- Change file name i915_gvt.* to intel_gvt.*
- Move GVT kernel parameter into intel_gvt.c
- Remove redundant debug macros
- Change error handling style
- Add introductions for some stub functions
- Introduce drm/i915_gvt.h.
Take Kevin's comments:
- Move GVT-g host/guest check into intel_vgt_balloon in i915_gem_gtt.c
v2:
- Introduce i915_gvt.c.
It's necessary to introduce the stubs between i915 driver and GVT-g host,
as GVT-g components is configurable in kernel config. When disabled, the
stubs here do nothing.
Take Joonas' comments:
- Replace boolean return value with int.
- Replace customized info/warn/debug macros with DRM macros.
- Document all non-static functions like i915.
- Remove empty and unused functions.
- Replace magic number with marcos.
- Set GVT-g in kernel config to "n" by default.
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1466078825-6662-5-git-send-email-zhi.a.wang@intel.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-06-16 12:07:00 +00:00
|
|
|
|
2023-01-17 12:33:06 +00:00
|
|
|
struct {
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
struct resource mch_res;
|
|
|
|
bool mchbar_need_disable;
|
|
|
|
} gmch;
|
2019-08-06 12:43:00 +00:00
|
|
|
|
|
|
|
struct rb_root uabi_engines;
|
2022-04-01 14:22:04 +00:00
|
|
|
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
|
2012-11-02 18:55:02 +00:00
|
|
|
|
|
|
|
/* protects the irq masks */
|
|
|
|
spinlock_t irq_lock;
|
|
|
|
|
2014-03-04 17:23:07 +00:00
|
|
|
bool display_irqs_enabled;
|
|
|
|
|
2015-05-26 17:42:30 +00:00
|
|
|
/* Sideband mailbox protection */
|
|
|
|
struct mutex sb_lock;
|
2019-04-26 08:17:18 +00:00
|
|
|
struct pm_qos_request sb_qos;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
|
|
|
/** Cached value of IMR to avoid reads in updating the bitfield */
|
drm/i915/bdw: Implement interrupt changes
The interrupt handling implementation remains the same as previous
generations with the 4 types of registers, status, identity, mask, and
enable. However the layout of where the bits go have changed entirely.
To address these changes, all of the interrupt vfuncs needed special
gen8 code.
The way it works is there is a top level status register now which
informs the interrupt service routine which unit caused the interrupt,
and therefore which interrupt registers to read to process the
interrupt. For display the division is quite logical, a set of interrupt
registers for each pipe, and in addition to those, a set each for "misc"
and port.
For GT the things get a bit hairy, as seen by the code. Each of the GT
units has it's own bits defined. They all look *very similar* and
resides in 16 bits of a GT register. As an example, RCS and BCS share
register 0. To compact the code a bit, at a slight expense to
complexity, this is exactly how the code works as well. 2 structures are
added to the ring buffer so that our ring buffer interrupt handling code
knows which ring shares the interrupt registers, and a shift value (ie.
the top or bottom 16 bits of the register).
The above allows us to kept the interrupt register caching scheme, the
per interrupt enables, and the code to mask and unmask interrupts
relatively clean (again at the cost of some more complexity).
Most of the GT units mentioned above are command streamers, and so the
symmetry should work quite well for even the yet to be implemented rings
which Broadwell adds.
v2: Fixes up a couple of bugs, and is more verbose about errors in the
Broadwell interrupt handler.
v3: fix DE_MISC IER offset
v4: Simplify interrupts:
I totally misread the docs the first time I implemented interrupts, and
so this should greatly simplify the mess. Unlike GEN6, we never touch
the regular mask registers in irq_get/put.
v5: Rebased on to of recent pch hotplug setup changes.
v6: Fixup on top of moving num_pipes to intel_info.
v7: Rebased on top of Egbert Eich's hpd irq handling rework. Also
wired up ibx_hpd_irq_setup for gen8.
v8: Rebase on top of Jani's asle handling rework.
v9: Rebase on top of Ben's VECS enabling for Haswell, where he
unfortunately went OCD on the gt irq #defines. Not that they're still
not yet fully consistent:
- Used the GT_RENDER_ #defines + bdw shifts.
- Dropped the shift from the L3_PARITY stuff, seemed clearer.
- s/irq_refcount/irq_refcount.gt/
v10: Squash in VECS enabling patches and the gen8_gt_irq_handler
refactoring from Zhao Yakui <yakui.zhao@intel.com>
v11: Rebase on top of the interrupt cleanups in upstream.
v12: Rebase on top of Ben's DPF changes in upstream.
v13: Drop bdw from the HAS_L3_DPF feature flag for now, it's unclear what
exactly needs to be done. Requested by Ben.
v14: Fix the patch.
- Drop the mask of reserved bits and assorted logic, it doesn't match
the spec.
- Do the posting read inconditionally instead of commenting it out.
- Add a GEN8_MASTER_IRQ_CONTROL definition and use it.
- Fix up the GEN8_PIPE interrupt defines and give the GEN8_ prefixes -
we actually will need to use them.
- Enclose macros in do {} while (0) (checkpatch).
- Clear DE_MISC interrupt bits only after having processed them.
- Fix whitespace fail (checkpatch).
- Fix overtly long lines where appropriate (checkpatch).
- Don't use typedef'ed private_t (maintainer-scripts).
- Align the function parameter list correctly.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v4)
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
bikeshed
2013-11-03 04:07:09 +00:00
|
|
|
union {
|
|
|
|
u32 irq_mask;
|
|
|
|
u32 de_irq_mask[I915_MAX_PIPES];
|
|
|
|
};
|
2014-02-10 16:42:49 +00:00
|
|
|
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2014-10-09 19:57:43 +00:00
|
|
|
bool preserve_bios_swizzle;
|
|
|
|
|
2012-11-02 18:55:02 +00:00
|
|
|
unsigned int fsb_freq, mem_freq, is_ddr3;
|
2016-05-13 20:41:27 +00:00
|
|
|
unsigned int skl_preferred_vco_freq;
|
2016-11-14 16:35:10 +00:00
|
|
|
|
2015-08-18 11:36:59 +00:00
|
|
|
unsigned int max_dotclk_freq;
|
2014-10-07 14:41:22 +00:00
|
|
|
unsigned int hpll_freq;
|
2015-09-24 20:29:18 +00:00
|
|
|
unsigned int czclk_freq;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2013-09-02 14:22:25 +00:00
|
|
|
/**
|
|
|
|
* wq - Driver workqueue for GEM.
|
|
|
|
*
|
|
|
|
* NOTE: Work items scheduled here are not allowed to grab any modeset
|
|
|
|
* locks, for otherwise the flushing done in the pageflip code will
|
|
|
|
* result in deadlocks.
|
|
|
|
*/
|
2012-11-02 18:55:02 +00:00
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
2021-09-28 22:57:51 +00:00
|
|
|
/* pm private clock gating functions */
|
2021-09-28 22:58:07 +00:00
|
|
|
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
|
2021-09-28 22:57:51 +00:00
|
|
|
|
2012-11-02 18:55:02 +00:00
|
|
|
/* PCH chipset type */
|
|
|
|
enum intel_pch pch_type;
|
2012-11-20 17:12:07 +00:00
|
|
|
unsigned short pch_id;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2022-08-24 13:15:59 +00:00
|
|
|
unsigned long gem_quirks;
|
2012-11-02 18:55:02 +00:00
|
|
|
|
2012-11-14 16:14:03 +00:00
|
|
|
struct i915_gem_mm mm;
|
2012-05-02 09:49:32 +00:00
|
|
|
|
2012-11-02 18:55:07 +00:00
|
|
|
struct intel_l3_parity l3_parity;
|
|
|
|
|
2019-03-28 17:45:32 +00:00
|
|
|
/*
|
|
|
|
* edram size in MB.
|
|
|
|
* Cannot be determined by PCIID. You must always read a register.
|
|
|
|
*/
|
|
|
|
u32 edram_size_mb;
|
2013-07-04 18:02:05 +00:00
|
|
|
|
2012-11-14 16:14:04 +00:00
|
|
|
struct i915_gpu_error gpu_error;
|
2010-10-01 13:57:56 +00:00
|
|
|
|
2014-03-10 09:01:44 +00:00
|
|
|
u32 suspend_count;
|
2012-11-02 18:55:02 +00:00
|
|
|
struct i915_suspend_saved_registers regfile;
|
2019-08-20 02:01:46 +00:00
|
|
|
struct vlv_s0ix_state *vlv_s0ix_state;
|
2012-11-02 18:55:05 +00:00
|
|
|
|
2018-08-24 09:32:21 +00:00
|
|
|
struct dram_info {
|
2021-01-28 16:43:12 +00:00
|
|
|
bool wm_lv_0_adjust_needed;
|
2018-08-24 09:32:21 +00:00
|
|
|
u8 num_channels;
|
2018-08-24 09:32:25 +00:00
|
|
|
bool symmetric_memory;
|
2019-03-06 20:35:51 +00:00
|
|
|
enum intel_dram_type {
|
|
|
|
INTEL_DRAM_UNKNOWN,
|
|
|
|
INTEL_DRAM_DDR3,
|
|
|
|
INTEL_DRAM_DDR4,
|
|
|
|
INTEL_DRAM_LPDDR3,
|
2021-02-04 20:04:58 +00:00
|
|
|
INTEL_DRAM_LPDDR4,
|
|
|
|
INTEL_DRAM_DDR5,
|
|
|
|
INTEL_DRAM_LPDDR5,
|
2019-03-06 20:35:51 +00:00
|
|
|
} type;
|
2021-01-28 16:43:11 +00:00
|
|
|
u8 num_qgv_points;
|
drm/i915: Implement PSF GV point support
PSF GV points are an additional factor that can limit the
bandwidth available to display, separate from the traditional
QGV points. Whereas traditional QGV points represent possible
memory clock frequencies, PSF GV points reflect possible
frequencies of the memory fabric.
Switching between PSF GV points has the advantage of incurring
almost no memory access block time and thus does not need to be
accounted for in watermark calculations.
This patch adds support for those on top of regular QGV points.
Those are supposed to be used simultaneously, i.e we are always
at some QGV and some PSF GV point, based on the current video
mode requirements.
Bspec: 64631, 53998
v2: Seems that initial assumption made during ml conversation
was wrong, PCode rejects any masks containing points beyond
the ones returned, so even though BSpec says we have around
8 points theoretically, we can mask/unmask only those which
are returned, trying to manipulate those beyond causes a
failure from PCode. So switched back to generating mask
from 1 - num_qgv_points, where num_qgv_points is the actual
amount of points, advertised by PCode.
v3: - Extended restricted qgv point mask to 0xf, as we have now
3:2 bits for PSF GV points(Matt Roper)
- Replaced val2 with NULL from PCode request, since its not being
used(Matt Roper)
- Replaced %d to 0x%x for better readability(thanks for spotting)
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210531064845.4389-2-stanislav.lisovskiy@intel.com
2021-05-31 06:48:45 +00:00
|
|
|
u8 num_psf_gv_points;
|
2018-08-24 09:32:21 +00:00
|
|
|
} dram_info;
|
|
|
|
|
2019-06-13 23:21:52 +00:00
|
|
|
struct intel_runtime_pm runtime_pm;
|
2013-12-06 22:32:13 +00:00
|
|
|
|
2019-10-07 21:09:41 +00:00
|
|
|
struct i915_perf perf;
|
drm/i915: Add i915 perf infrastructure
Adds base i915 perf infrastructure for Gen performance metrics.
This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64
properties to configure a stream of metrics and returns a new fd usable
with standard VFS system calls including read() to read typed and sized
records; ioctl() to enable or disable capture and poll() to wait for
data.
A stream is opened something like:
uint64_t properties[] = {
/* Single context sampling */
DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle,
/* Include OA reports in samples */
DRM_I915_PERF_PROP_SAMPLE_OA, true,
/* OA unit configuration */
DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
DRM_I915_PERF_PROP_OA_FORMAT, report_format,
DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
};
struct drm_i915_perf_open_param parm = {
.flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED,
.properties_ptr = (uint64_t)properties,
.num_properties = sizeof(properties) / 16,
};
int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
Records read all start with a common { type, size } header with
DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records
contain an extensible number of fields and it's the
DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that
determine what's included in every sample.
No specific streams are supported yet so any attempt to open a stream
will return an error.
v2:
use i915_gem_context_get() - Chris Wilson
v3:
update read() interface to avoid passing state struct - Chris Wilson
fix some rebase fallout, with i915-perf init/deinit
v4:
s/DRM_IORW/DRM_IOW/ - Emil Velikov
Signed-off-by: Robert Bragg <robert@sixbynine.org>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org
2016-11-07 19:49:47 +00:00
|
|
|
|
2022-10-13 15:45:20 +00:00
|
|
|
struct i915_hwmon *hwmon;
|
|
|
|
|
2014-07-24 16:04:21 +00:00
|
|
|
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
2021-12-14 19:33:40 +00:00
|
|
|
struct intel_gt gt0;
|
2019-04-24 20:07:14 +00:00
|
|
|
|
2022-03-18 23:39:33 +00:00
|
|
|
/*
|
|
|
|
* i915->gt[0] == &i915->gt0
|
|
|
|
*/
|
|
|
|
#define I915_MAX_GT 4
|
|
|
|
struct intel_gt *gt[I915_MAX_GT];
|
|
|
|
|
2022-03-18 23:39:34 +00:00
|
|
|
struct kobject *sysfs_gt;
|
|
|
|
|
2022-09-06 23:49:34 +00:00
|
|
|
/* Quick lookup of media GT (current platforms only have one) */
|
|
|
|
struct intel_gt *media_gt;
|
|
|
|
|
2019-04-24 20:07:14 +00:00
|
|
|
struct {
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_gem_contexts {
|
|
|
|
spinlock_t lock; /* locks list */
|
|
|
|
struct list_head list;
|
|
|
|
} contexts;
|
2020-01-01 14:10:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We replace the local file with a global mappings as the
|
|
|
|
* backing storage for the mmap is on the device and not
|
|
|
|
* on the struct file, and we do not want to prolong the
|
|
|
|
* lifetime of the local fd. To minimise the number of
|
|
|
|
* anonymous inodes we create, we use a global singleton to
|
|
|
|
* share the global mapping.
|
|
|
|
*/
|
|
|
|
struct file *mmap_singleton;
|
2019-04-24 20:07:14 +00:00
|
|
|
} gem;
|
2014-07-24 16:04:21 +00:00
|
|
|
|
drm/i915/pxp: Promote pxp subsystem to top-level of i915
Starting with MTL, there will be two GT-tiles, a render and media
tile. PXP as a service for supporting workloads with protected
contexts and protected buffers can be subscribed by process
workloads on any tile. However, depending on the platform,
only one of the tiles is used for control events pertaining to PXP
operation (such as creating the arbitration session and session
tear-down).
PXP as a global feature is accessible via batch buffer instructions
on any engine/tile and the coherency across tiles is handled implicitly
by the HW. In fact, for the foreseeable future, we are expecting this
single-control-tile for the PXP subsystem.
In MTL, it's the standalone media tile (not the root tile) because
it contains the VDBOX and KCR engine (among the assets PXP relies on
for those events).
Looking at the current code design, each tile is represented by the
intel_gt structure while the intel_pxp structure currently hangs off the
intel_gt structure.
Keeping the intel_pxp structure within the intel_gt structure makes some
internal functionalities more straight forward but adds code complexity to
code readability and maintainibility to many external-to-pxp subsystems
which may need to pick the correct intel_gt structure. An example of this
would be the intel_pxp_is_active or intel_pxp_is_enabled functionality
which should be viewed as a global level inquiry, not a per-gt inquiry.
That said, this series promotes the intel_pxp structure into the
drm_i915_private structure making it a top-level subsystem and the PXP
subsystem will select the control gt internally and keep a pointer to
it for internal reference.
This promotion comes with two noteworthy changes:
1. Exported pxp functions that are called by external subsystems
(such as intel_pxp_enabled/active) will have to check implicitly
if i915->pxp is valid as that structure will not be allocated
for HW that doesn't support PXP.
2. Since GT is now considered a soft-dependency of PXP we are
ensuring that GT init happens before PXP init and vice versa
for fini. This causes a minor ordering change whereby we previously
called intel_pxp_suspend after intel_uc_suspend but now is before
i915_gem_suspend_late but the change is required for correct
dependency flows. Additionally, this re-order change doesn't
have any impact because at that point in either case, the top level
entry to i915 won't observe any PXP events (since the GPU was
quiesced during suspend_prepare). Also, any PXP event doesn't
really matter when we disable the PXP HW (global GT irqs are
already off anyway, so even if there was a bug that generated
spurious events we wouldn't see it and we would just clean it
up on resume which is okay since the default fallback action
for PXP would be to keep the sessions off at this suspend stage).
Changes from prior revs:
v11: - Reformat a comment (Tvrtko).
v10: - Change the code flow for intel_pxp_init to make it more
cleaner and readible with better comments explaining the
difference between full-PXP-feature vs the partial-teelink
inits depending on the platform. Additionally, only do
the pxp allocation when we are certain the subsystem is
needed. (Tvrtko).
v9: - Cosmetic cleanups in supported/enabled/active. (Daniele).
- Add comments for intel_pxp_init and pxp_get_ctrl_gt that
explain the functional flow for when PXP is not supported
but the backend-assets are needed for HuC authentication
(Daniele and Tvrtko).
- Fix two remaining functions that are accessible outside
PXP that need to be checking pxp ptrs before using them:
intel_pxp_irq_handler and intel_pxp_huc_load_and_auth
(Tvrtko and Daniele).
- User helper macro in pxp-debugfs (Tvrtko).
v8: - Remove pxp_to_gt macro (Daniele).
- Fix a bug in pxp_get_ctrl_gt for the case of MTL and we don't
support GSC-FW on it. (Daniele).
- Leave i915->pxp as NULL if we dont support PXP and in line
with that, do additional validity check on i915->pxp for
intel_pxp_is_supported/enabled/active (Daniele).
- Remove unncessary include header from intel_gt_debugfs.c
and check drm_minor i915->drm.primary (Daniele).
- Other cosmetics / minor issues / more comments on suspend
flow order change (Daniele).
v7: - Drop i915_dev_to_pxp and in intel_pxp_init use 'i915->pxp'
through out instead of local variable newpxp. (Rodrigo)
- In the case intel_pxp_fini is called during driver unload but
after i915 loading failed without pxp being allocated, check
i915->pxp before referencing it. (Alan)
v6: - Remove HAS_PXP macro and replace it with intel_pxp_is_supported
because : [1] introduction of 'ctrl_gt' means we correct this
for MTL's upcoming series now. [2] Also, this has little impact
globally as its only used by PXP-internal callers at the moment.
- Change intel_pxp_init/fini to take in i915 as its input to avoid
ptr-to-ptr in init/fini calls.(Jani).
- Remove the backpointer from pxp->i915 since we can use
pxp->ctrl_gt->i915 if we need it. (Rodrigo).
v5: - Switch from series to single patch (Rodrigo).
- change function name from pxp_get_kcr_owner_gt to
pxp_get_ctrl_gt.
- Fix CI BAT failure by removing redundant call to intel_pxp_fini
from driver-remove.
- NOTE: remaining open still persists on using ptr-to-ptr
and back-ptr.
v4: - Instead of maintaining intel_pxp as an intel_gt structure member
and creating a number of convoluted helpers that takes in i915 as
input and redirects to the correct intel_gt or takes any intel_gt
and internally replaces with the correct intel_gt, promote it to
be a top-level i915 structure.
v3: - Rename gt level helper functions to "intel_pxp_is_enabled/
supported/ active_on_gt" (Daniele)
- Upgrade _gt_supports_pxp to replace what was intel_gtpxp_is
supported as the new intel_pxp_is_supported_on_gt to check for
PXP feature support vs the tee support for huc authentication.
Fix pxp-debugfs-registration to use only the former to decide
support. (Daniele)
- Couple minor optimizations.
v2: - Avoid introduction of new device info or gt variables and use
existing checks / macros to differentiate the correct GT->PXP
control ownership (Daniele Ceraolo Spurio)
- Don't reuse the updated global-checkers for per-GT callers (such
as other files within PXP) to avoid unnecessary GT-reparsing,
expose a replacement helper like the prior ones. (Daniele).
v1: - Add one more patch to the series for the intel_pxp suspend/resume
for similar refactoring
References: https://patchwork.freedesktop.org/patch/msgid/20221202011407.4068371-1-alan.previn.teres.alexis@intel.com
Signed-off-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221208180542.998148-1-alan.previn.teres.alexis@intel.com
2022-12-08 18:05:42 +00:00
|
|
|
struct intel_pxp *pxp;
|
|
|
|
|
2019-10-03 14:02:31 +00:00
|
|
|
/* For i915gm/i945gm vblank irq workaround */
|
|
|
|
u8 vblank_enabled;
|
2019-03-22 18:08:03 +00:00
|
|
|
|
2021-06-25 08:22:01 +00:00
|
|
|
bool irq_enabled;
|
|
|
|
|
drm/i915/pmu: Expose a PMU interface for perf queries
From: Chris Wilson <chris@chris-wilson.co.uk>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
The first goal is to be able to measure GPU (and invidual ring) busyness
without having to poll registers from userspace. (Which not only incurs
holding the forcewake lock indefinitely, perturbing the system, but also
runs the risk of hanging the machine.) As an alternative we can use the
perf event counter interface to sample the ring registers periodically
and send those results to userspace.
Functionality we are exporting to userspace is via the existing perf PMU
API and can be exercised via the existing tools. For example:
perf stat -a -e i915/rcs0-busy/ -I 1000
Will print the render engine busynnes once per second. All the performance
counters can be enumerated (perf list) and have their unit of measure
correctly reported in sysfs.
v1-v2 (Chris Wilson):
v2: Use a common timer for the ring sampling.
v3: (Tvrtko Ursulin)
* Decouple uAPI from i915 engine ids.
* Complete uAPI defines.
* Refactor some code to helpers for clarity.
* Skip sampling disabled engines.
* Expose counters in sysfs.
* Pass in fake regs to avoid null ptr deref in perf core.
* Convert to class/instance uAPI.
* Use shared driver code for rc6 residency, power and frequency.
v4: (Dmitry Rogozhkin)
* Register PMU with .task_ctx_nr=perf_invalid_context
* Expose cpumask for the PMU with the single CPU in the mask
* Properly support pmu->stop(): it should call pmu->read()
* Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)
* Introduce refcounting of event subscriptions.
* Make pmu.busy_stats a refcounter to avoid busy stats going away
with some deleted event.
* Expose cpumask for i915 PMU to avoid multiple events creation of
the same type followed by counter aggregation by perf-stat.
* Track CPUs getting online/offline to migrate perf context. If (likely)
cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
needed to see effect of CPU status tracking.
* End result is that only global events are supported and perf stat
works correctly.
* Deny perf driver level sampling - it is prohibited for uncore PMU.
v5: (Tvrtko Ursulin)
* Don't hardcode number of engine samplers.
* Rewrite event ref-counting for correctness and simplicity.
* Store initial counter value when starting already enabled events
to correctly report values to all listeners.
* Fix RC6 residency readout.
* Comments, GPL header.
v6:
* Add missing entry to v4 changelog.
* Fix accounting in CPU hotplug case by copying the approach from
arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin)
v7:
* Log failure message only on failure.
* Remove CPU hotplug notification state on unregister.
v8:
* Fix error unwind on failed registration.
* Checkpatch cleanup.
v9:
* Drop the energy metric, it is available via intel_rapl_perf.
(Ville Syrjälä)
* Use HAS_RC6(p). (Chris Wilson)
* Handle unsupported non-engine events. (Dmitry Rogozhkin)
* Rebase for intel_rc6_residency_ns needing caller managed
runtime pm.
* Drop HAS_RC6 checks from the read callback since creating those
events will be rejected at init time already.
* Add counter units to sysfs so perf stat output is nicer.
* Cleanup the attribute tables for brevity and readability.
v10:
* Fixed queued accounting.
v11:
* Move intel_engine_lookup_user to intel_engine_cs.c
* Commit update. (Joonas Lahtinen)
v12:
* More accurate sampling. (Chris Wilson)
* Store and report frequency in MHz for better usability from
perf stat.
* Removed metrics: queued, interrupts, rc6 counters.
* Sample engine busyness based on seqno difference only
for less MMIO (and forcewake) on all platforms. (Chris Wilson)
v13:
* Comment spelling, use mul_u32_u32 to work around potential GCC
issue and somne code alignment changes. (Chris Wilson)
v14:
* Rebase.
v15:
* Rebase for RPS refactoring.
v16:
* Use the dynamic slot in the CPU hotplug state machine so that we are
free to setup our state as multi-instance. Previously we were re-using
the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as
multi-instance, nor owned by our driver to start with.
* Register the CPU hotplug handlers after the PMU, otherwise the callback
will get called before the PMU is initialized which can end up in
perf_pmu_migrate_context with an un-initialized base.
* Added workaround for a probable bug in cpuhp core.
v17:
* Remove workaround for the cpuhp bug.
v18:
* Rebase for drm_i915_gem_engine_class getting upstream before us.
v19:
* Rebase. (trivial)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-21 18:18:45 +00:00
|
|
|
struct i915_pmu pmu;
|
|
|
|
|
2022-04-01 14:21:58 +00:00
|
|
|
struct i915_drm_clients clients;
|
|
|
|
|
2021-06-02 08:38:08 +00:00
|
|
|
/* The TTM device structure. */
|
|
|
|
struct ttm_device bdev;
|
|
|
|
|
2019-11-01 10:15:28 +00:00
|
|
|
I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
|
|
|
|
|
2014-05-21 15:37:52 +00:00
|
|
|
/*
|
|
|
|
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
|
|
|
* will be rejected. Instead look for a better place.
|
|
|
|
*/
|
2014-03-31 11:27:22 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-08-01 17:39:55 +00:00
|
|
|
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
|
|
|
{
|
2016-06-24 13:00:21 +00:00
|
|
|
return container_of(dev, struct drm_i915_private, drm);
|
2013-08-01 17:39:55 +00:00
|
|
|
}
|
|
|
|
|
2016-08-22 10:32:42 +00:00
|
|
|
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
|
2015-01-08 15:54:13 +00:00
|
|
|
{
|
2019-08-06 07:42:19 +00:00
|
|
|
return dev_get_drvdata(kdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
return pci_get_drvdata(pdev);
|
2015-01-08 15:54:13 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 19:33:32 +00:00
|
|
|
static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
|
|
|
|
{
|
2021-12-14 19:33:40 +00:00
|
|
|
return &i915->gt0;
|
2021-12-14 19:33:32 +00:00
|
|
|
}
|
|
|
|
|
2016-03-24 11:20:38 +00:00
|
|
|
/* Simple iterator over all initialised engines */
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 17:14:48 +00:00
|
|
|
#define for_each_engine(engine__, dev_priv__, id__) \
|
|
|
|
for ((id__) = 0; \
|
|
|
|
(id__) < I915_NUM_ENGINES; \
|
|
|
|
(id__)++) \
|
|
|
|
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
|
2016-03-23 18:19:53 +00:00
|
|
|
|
|
|
|
/* Iterator over subset of engines selected by mask */
|
2019-10-17 16:18:52 +00:00
|
|
|
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
2020-07-08 00:39:47 +00:00
|
|
|
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
|
2018-04-06 11:44:07 +00:00
|
|
|
(tmp__) ? \
|
2019-10-17 16:18:52 +00:00
|
|
|
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
2018-04-06 11:44:07 +00:00
|
|
|
0;)
|
2016-03-16 15:54:00 +00:00
|
|
|
|
2019-08-06 12:43:00 +00:00
|
|
|
#define rb_to_uabi_engine(rb) \
|
|
|
|
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
|
|
|
|
|
|
|
|
#define for_each_uabi_engine(engine__, i915__) \
|
|
|
|
for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
|
|
|
|
(engine__); \
|
|
|
|
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
|
|
|
|
|
2020-06-04 12:36:41 +00:00
|
|
|
#define for_each_uabi_class_engine(engine__, class__, i915__) \
|
|
|
|
for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
|
|
|
|
(engine__) && (engine__)->uabi_class == (class__); \
|
|
|
|
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
|
|
|
|
|
2018-12-31 14:56:46 +00:00
|
|
|
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
|
2018-12-31 14:56:41 +00:00
|
|
|
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
|
2018-07-06 10:14:41 +00:00
|
|
|
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
|
2016-10-13 10:02:58 +00:00
|
|
|
|
2018-12-31 14:56:41 +00:00
|
|
|
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
|
2010-11-09 09:17:32 +00:00
|
|
|
|
2021-07-07 23:59:21 +00:00
|
|
|
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
|
2021-07-07 23:59:21 +00:00
|
|
|
|
2022-09-02 22:10:54 +00:00
|
|
|
#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
|
|
|
|
#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
|
|
|
|
RUNTIME_INFO(i915)->graphics.ip.rel)
|
2021-04-13 05:09:54 +00:00
|
|
|
#define IS_GRAPHICS_VER(i915, from, until) \
|
|
|
|
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
|
|
|
|
|
2022-09-02 22:10:54 +00:00
|
|
|
#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
|
|
|
|
#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
|
|
|
|
RUNTIME_INFO(i915)->media.ip.rel)
|
2021-04-13 05:09:54 +00:00
|
|
|
#define IS_MEDIA_VER(i915, from, until) \
|
|
|
|
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
|
|
|
|
|
2022-09-02 22:10:54 +00:00
|
|
|
#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver)
|
drm/i915/display: rename display version macros
While converting the rest of the driver to use GRAPHICS_VER() and
MEDIA_VER(), following what was done for display, some discussions went
back on what we did for display:
1) Why is the == comparison special that deserves a separate
macro instead of just getting the version and comparing directly
like is done for >, >=, <=?
2) IS_DISPLAY_RANGE() is weird in that it omits the "_VER" for
brevity. If we remove the current users of IS_DISPLAY_VER(), we
could actually repurpose it for a range check
With (1) there could be an advantage if we used gen_mask since multiple
conditionals be combined by the compiler in a single and instruction and
check the result. However a) INTEL_GEN() doesn't use the mask since it
would make the code bigger everywhere else and b) in the cases it made
sense, it also made sense to convert to the _RANGE() variant.
So here we repurpose IS_DISPLAY_VER() to work with a [ from, to ] range
like was the IS_DISPLAY_RANGE() and convert the current IS_DISPLAY_VER()
users to use == and != operators. Aside from the definition changes,
this was done by the following semantic patch:
@@ expression dev_priv, E1; @@
- !IS_DISPLAY_VER(dev_priv, E1)
+ DISPLAY_VER(dev_priv) != E1
@@ expression dev_priv, E1; @@
- IS_DISPLAY_VER(dev_priv, E1)
+ DISPLAY_VER(dev_priv) == E1
@@ expression dev_priv, from, until; @@
- IS_DISPLAY_RANGE(dev_priv, from, until)
+ IS_DISPLAY_VER(dev_priv, from, until)
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
[Jani: Minor conflict resolve while applying.]
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-4-lucas.demarchi@intel.com
2021-04-13 05:09:53 +00:00
|
|
|
#define IS_DISPLAY_VER(i915, from, until) \
|
drm/i915: Add DISPLAY_VER() and related macros
Although we've long referred to platforms by a single "GEN" number, the
hardware teams have recommended that we stop doing this since the
various component IP blocks are going to start using independent number
schemes with varying cadence. To support this, hardware platforms a bit
down the road are going to start providing MMIO registers that the
driver can read to obtain the "graphics version," "media version," and
"display version" without needing to do a PCI ID -> platform -> version
translation.
Although our current platforms don't yet expose these registers (and the
next couple we release probably won't have them yet either), the
hardware teams would still like to see us move to this independent
numbering scheme now in preparation. For i915 that means we should try
to eliminate all usage of INTEL_GEN() throughout our code and instead
replace it with separate GRAPHICS_VER(), MEDIA_VER(), and DISPLAY_VER()
constructs in the code. For old platforms, these will all usually give
the same value for each IP block (aside from a few special cases like
GLK which we can no more accurately represent as graphics=9 +
display=10), but future platforms will have more flexibility to bump IP
version numbers independently.
The upcoming ADL-P platform will have a display version of 13 and a
graphics version of 12, so let's just the first step of breaking out
DISPLAY_VER(), but leaving the rest of INTEL_GEN() untouched for now.
For now we'll automatically derive the display version from the
platform's INTEL_GEN() value except in cases where an alternative
display version is explicitly provided in the device info structure.
We also add some helper macros IS_DISPLAY_VER(i915, ver) and
IS_DISPLAY_RANGE(i915, from, until) that match the behavior of the
existing gen-based macros. However unlike IS_GEN(), we will implement
those macros with direct comparisons rather than trying to maintain a
mask to help compiler optimization. In practice the optimization winds
up not being used in very many places (since the vast majority of our
platform checks are of the form "gen >= x") so there is pretty minimal
size reduction in the final driver binary[1]. We're also likely going
to need to extend these version numbers to non-integer major.minor
values at some point in the future, so the mask approach won't work at
all once we get to platforms like that.
[1] The results before/after the next patch in this series, which
switches our code over to the new display macros:
$ size i915.ko.{orig,new}
text data bss dec hex filename
2940291 102944 5384 3048619 2e84ab i915.ko.orig
2940723 102956 5384 3049063 2e8667 i915.ko.new
v2:
- Move version into device info's display sub-struct. (Jani)
- Add extra parentheses to macros. (Jani)
- Note the lack of genmask optimization in the display-based macros and
give size data. (Lucas)
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210320044245.3920043-3-matthew.d.roper@intel.com
2021-03-20 04:42:41 +00:00
|
|
|
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
|
|
|
|
|
2021-01-28 13:31:23 +00:00
|
|
|
#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
|
2016-05-10 09:57:08 +00:00
|
|
|
|
2021-03-26 13:21:37 +00:00
|
|
|
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
|
2021-10-20 00:23:53 +00:00
|
|
|
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
|
2021-10-20 00:23:52 +00:00
|
|
|
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
|
2022-05-27 16:33:47 +00:00
|
|
|
#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
|
2021-03-26 13:21:33 +00:00
|
|
|
|
|
|
|
#define IS_DISPLAY_STEP(__i915, since, until) \
|
|
|
|
(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
|
2021-07-17 05:14:26 +00:00
|
|
|
INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
|
2021-03-26 13:21:33 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_GRAPHICS_STEP(__i915, since, until) \
|
|
|
|
(drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
|
|
|
|
INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
|
2021-03-26 13:21:33 +00:00
|
|
|
|
2021-10-20 00:23:52 +00:00
|
|
|
#define IS_MEDIA_STEP(__i915, since, until) \
|
|
|
|
(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
|
|
|
|
INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
|
|
|
|
|
2022-05-27 16:33:47 +00:00
|
|
|
#define IS_BASEDIE_STEP(__i915, since, until) \
|
|
|
|
(drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
|
|
|
|
INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
|
|
|
|
|
2019-03-27 14:23:28 +00:00
|
|
|
static __always_inline unsigned int
|
|
|
|
__platform_mask_index(const struct intel_runtime_info *info,
|
|
|
|
enum intel_platform p)
|
|
|
|
{
|
|
|
|
const unsigned int pbits =
|
|
|
|
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
|
|
|
|
|
|
|
|
/* Expand the platform_mask array if this fails. */
|
|
|
|
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
|
|
|
|
pbits * ARRAY_SIZE(info->platform_mask));
|
|
|
|
|
|
|
|
return p / pbits;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned int
|
|
|
|
__platform_mask_bit(const struct intel_runtime_info *info,
|
|
|
|
enum intel_platform p)
|
|
|
|
{
|
|
|
|
const unsigned int pbits =
|
|
|
|
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
|
|
|
|
|
|
|
|
return p % pbits + INTEL_SUBPLATFORM_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32
|
|
|
|
intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
|
|
|
|
{
|
|
|
|
const unsigned int pi = __platform_mask_index(info, p);
|
|
|
|
|
2021-01-21 16:19:36 +00:00
|
|
|
return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
|
2019-03-27 14:23:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool
|
|
|
|
IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
|
|
|
|
{
|
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
|
|
|
|
const unsigned int pi = __platform_mask_index(info, p);
|
|
|
|
const unsigned int pb = __platform_mask_bit(info, p);
|
|
|
|
|
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(p));
|
|
|
|
|
|
|
|
return info->platform_mask[pi] & BIT(pb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline bool
|
|
|
|
IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|
|
|
enum intel_platform p, unsigned int s)
|
|
|
|
{
|
|
|
|
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
|
|
|
|
const unsigned int pi = __platform_mask_index(info, p);
|
|
|
|
const unsigned int pb = __platform_mask_bit(info, p);
|
|
|
|
const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
|
|
|
|
const u32 mask = info->platform_mask[pi];
|
|
|
|
|
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(p));
|
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(s));
|
|
|
|
BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
|
|
|
|
|
|
|
|
/* Shift and test on the MSB position so sign flag can be used. */
|
|
|
|
return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
|
|
|
|
}
|
2017-09-20 09:26:59 +00:00
|
|
|
|
2019-03-26 07:40:55 +00:00
|
|
|
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
|
2019-10-24 19:51:19 +00:00
|
|
|
#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
|
2019-03-26 07:40:55 +00:00
|
|
|
|
2017-09-20 09:26:59 +00:00
|
|
|
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
|
|
|
|
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
|
|
|
|
#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
|
|
|
|
#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
|
|
|
|
#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
|
|
|
|
#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
|
|
|
|
#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
|
|
|
|
#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
|
|
|
|
#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
|
|
|
|
#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
|
|
|
|
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
|
|
|
|
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
|
2016-11-30 15:43:05 +00:00
|
|
|
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
|
2017-09-20 09:26:59 +00:00
|
|
|
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
|
|
|
|
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
|
2019-03-26 07:40:55 +00:00
|
|
|
#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
|
|
|
|
#define IS_IRONLAKE_M(dev_priv) \
|
|
|
|
(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
|
2021-03-20 04:42:40 +00:00
|
|
|
#define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
|
2017-09-20 09:26:59 +00:00
|
|
|
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
|
2017-08-30 16:12:07 +00:00
|
|
|
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 1)
|
2017-09-20 09:26:59 +00:00
|
|
|
#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
|
|
|
|
#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
|
|
|
|
#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
|
|
|
|
#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
|
|
|
|
#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
|
|
|
|
#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
|
|
|
|
#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
|
|
|
|
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
|
|
|
|
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
|
2020-06-02 14:05:40 +00:00
|
|
|
#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
|
2018-01-11 18:00:04 +00:00
|
|
|
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
|
2020-10-13 19:29:48 +00:00
|
|
|
#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
|
|
|
|
IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
|
2019-07-11 17:30:56 +00:00
|
|
|
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
|
2020-05-04 22:52:06 +00:00
|
|
|
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
|
2020-07-13 18:23:17 +00:00
|
|
|
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
|
2021-01-19 19:29:31 +00:00
|
|
|
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
|
2021-05-06 16:19:23 +00:00
|
|
|
#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
|
2021-07-21 22:30:27 +00:00
|
|
|
#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
|
2021-07-21 22:30:28 +00:00
|
|
|
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
|
2022-05-02 16:34:07 +00:00
|
|
|
#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
|
2022-07-08 00:03:34 +00:00
|
|
|
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE)
|
2022-05-02 16:34:07 +00:00
|
|
|
|
2022-07-08 00:03:35 +00:00
|
|
|
#define IS_METEORLAKE_M(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
|
|
|
|
#define IS_METEORLAKE_P(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
|
2021-07-21 22:30:28 +00:00
|
|
|
#define IS_DG2_G10(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
|
|
|
|
#define IS_DG2_G11(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
|
2022-01-20 23:50:16 +00:00
|
|
|
#define IS_DG2_G12(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
|
2021-12-03 06:35:43 +00:00
|
|
|
#define IS_ADLS_RPLS(dev_priv) \
|
2022-04-18 06:21:57 +00:00
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
|
2021-12-10 05:18:02 +00:00
|
|
|
#define IS_ADLP_N(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
|
2022-04-18 06:21:57 +00:00
|
|
|
#define IS_ADLP_RPLP(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
|
|
|
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
2019-03-27 14:23:28 +00:00
|
|
|
#define IS_BDW_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
|
|
|
|
#define IS_BDW_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 3)
|
2019-03-27 14:23:28 +00:00
|
|
|
#define IS_HSW_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 3)
|
2018-12-28 14:07:34 +00:00
|
|
|
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 1)
|
2014-04-29 14:00:22 +00:00
|
|
|
/* ULX machines are also considered ULT. */
|
2019-03-27 14:23:28 +00:00
|
|
|
#define IS_HSW_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
|
|
|
|
#define IS_SKL_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
|
|
|
|
#define IS_SKL_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
|
|
|
|
#define IS_KBL_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
|
|
|
|
#define IS_KBL_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
|
2017-06-13 11:23:03 +00:00
|
|
|
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 2)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 3)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 4)
|
2017-06-13 11:23:07 +00:00
|
|
|
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 2)
|
2017-06-13 11:23:07 +00:00
|
|
|
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 3)
|
2019-03-27 14:23:28 +00:00
|
|
|
#define IS_CFL_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
|
2019-06-05 16:29:46 +00:00
|
|
|
#define IS_CFL_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
|
2017-09-18 11:21:24 +00:00
|
|
|
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 2)
|
2017-11-10 19:08:40 +00:00
|
|
|
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
2018-12-31 14:56:44 +00:00
|
|
|
INTEL_INFO(dev_priv)->gt == 3)
|
2020-06-02 14:05:40 +00:00
|
|
|
|
|
|
|
#define IS_CML_ULT(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
|
|
|
|
#define IS_CML_ULX(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
|
|
|
|
#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
|
|
|
|
INTEL_INFO(dev_priv)->gt == 2)
|
|
|
|
|
2019-03-27 14:23:28 +00:00
|
|
|
#define IS_ICL_WITH_PORT_F(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
|
2015-09-12 04:47:50 +00:00
|
|
|
|
2022-02-22 14:14:24 +00:00
|
|
|
#define IS_TGL_UY(dev_priv) \
|
|
|
|
IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
|
2020-08-07 19:26:28 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
|
2015-10-20 12:22:02 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
|
|
|
|
(IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
|
2021-03-26 13:21:34 +00:00
|
|
|
#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
|
|
|
|
(IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
|
2016-06-07 14:18:55 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
|
|
|
|
(IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
|
2021-07-13 19:36:31 +00:00
|
|
|
#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
|
|
|
|
(IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
|
2020-05-12 18:00:50 +00:00
|
|
|
|
2021-03-26 13:21:36 +00:00
|
|
|
#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
|
2021-03-26 13:21:35 +00:00
|
|
|
(IS_TIGERLAKE(__i915) && \
|
|
|
|
IS_DISPLAY_STEP(__i915, since, until))
|
2020-08-27 23:39:43 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
|
2022-02-22 14:14:24 +00:00
|
|
|
(IS_TGL_UY(__i915) && \
|
2021-10-20 00:23:53 +00:00
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
2020-08-27 23:39:43 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
|
2022-02-22 14:14:24 +00:00
|
|
|
(IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
|
2021-10-20 00:23:53 +00:00
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
2019-10-15 15:44:39 +00:00
|
|
|
|
2021-07-13 19:36:32 +00:00
|
|
|
#define IS_RKL_DISPLAY_STEP(p, since, until) \
|
|
|
|
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
|
2020-07-13 18:23:17 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_DG1_GRAPHICS_STEP(p, since, until) \
|
|
|
|
(IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
|
2021-07-13 19:36:33 +00:00
|
|
|
#define IS_DG1_DISPLAY_STEP(p, since, until) \
|
|
|
|
(IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
|
2020-07-13 18:23:17 +00:00
|
|
|
|
2021-03-26 13:21:36 +00:00
|
|
|
#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
|
2021-03-26 13:21:35 +00:00
|
|
|
(IS_ALDERLAKE_S(__i915) && \
|
|
|
|
IS_DISPLAY_STEP(__i915, since, until))
|
2021-01-19 19:29:31 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
|
2021-03-26 13:21:35 +00:00
|
|
|
(IS_ALDERLAKE_S(__i915) && \
|
2021-10-20 00:23:53 +00:00
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
2021-01-19 19:29:31 +00:00
|
|
|
|
2021-05-14 15:37:08 +00:00
|
|
|
#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
|
|
|
|
(IS_ALDERLAKE_P(__i915) && \
|
|
|
|
IS_DISPLAY_STEP(__i915, since, until))
|
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
|
2021-05-14 15:37:08 +00:00
|
|
|
(IS_ALDERLAKE_P(__i915) && \
|
2021-10-20 00:23:53 +00:00
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
2021-05-14 15:37:08 +00:00
|
|
|
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
|
|
|
|
(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
|
2021-07-21 22:30:27 +00:00
|
|
|
|
2022-11-03 18:45:59 +00:00
|
|
|
#define IS_MTL_GRAPHICS_STEP(__i915, variant, since, until) \
|
|
|
|
(IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
|
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
|
|
|
|
2022-12-09 22:05:43 +00:00
|
|
|
#define IS_MTL_DISPLAY_STEP(__i915, since, until) \
|
|
|
|
(IS_METEORLAKE(__i915) && \
|
|
|
|
IS_DISPLAY_STEP(__i915, since, until))
|
|
|
|
|
2021-07-21 22:30:28 +00:00
|
|
|
/*
|
2022-01-20 23:50:16 +00:00
|
|
|
* DG2 hardware steppings are a bit unusual. The hardware design was forked to
|
|
|
|
* create three variants (G10, G11, and G12) which each have distinct
|
|
|
|
* workaround sets. The G11 and G12 forks of the DG2 design reset the GT
|
|
|
|
* stepping back to "A0" for their first iterations, even though they're more
|
|
|
|
* similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
|
|
|
|
* functionality and workarounds. However the display stepping does not reset
|
|
|
|
* in the same manner --- a specific stepping like "B0" has a consistent
|
|
|
|
* meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
|
2021-07-21 22:30:28 +00:00
|
|
|
*
|
|
|
|
* TLDR: All GT workarounds and stepping-specific logic must be applied in
|
2022-01-20 23:50:16 +00:00
|
|
|
* relation to a specific subplatform (G10/G11/G12), whereas display workarounds
|
2021-07-21 22:30:28 +00:00
|
|
|
* and stepping-specific logic will be applied with a general DG2-wide stepping
|
|
|
|
* number.
|
|
|
|
*/
|
2021-10-20 00:23:53 +00:00
|
|
|
#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
|
2021-07-21 22:30:28 +00:00
|
|
|
(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
|
2021-10-20 00:23:53 +00:00
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
2021-07-21 22:30:28 +00:00
|
|
|
|
2021-11-16 17:48:14 +00:00
|
|
|
#define IS_DG2_DISPLAY_STEP(__i915, since, until) \
|
2021-07-21 22:30:28 +00:00
|
|
|
(IS_DG2(__i915) && \
|
|
|
|
IS_DISPLAY_STEP(__i915, since, until))
|
|
|
|
|
2022-05-27 16:33:47 +00:00
|
|
|
#define IS_PVC_BD_STEP(__i915, since, until) \
|
|
|
|
(IS_PONTEVECCHIO(__i915) && \
|
|
|
|
IS_BASEDIE_STEP(__i915, since, until))
|
|
|
|
|
|
|
|
#define IS_PVC_CT_STEP(__i915, since, until) \
|
|
|
|
(IS_PONTEVECCHIO(__i915) && \
|
|
|
|
IS_GRAPHICS_STEP(__i915, since, until))
|
|
|
|
|
2021-06-06 04:50:50 +00:00
|
|
|
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
|
|
|
|
#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
|
|
|
|
#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
|
2016-11-10 15:23:09 +00:00
|
|
|
|
2020-07-08 00:39:45 +00:00
|
|
|
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
|
2020-07-08 00:39:47 +00:00
|
|
|
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
|
2016-06-23 13:52:41 +00:00
|
|
|
|
2022-11-08 02:05:54 +00:00
|
|
|
#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
|
2019-03-22 00:24:31 +00:00
|
|
|
unsigned int first__ = (first); \
|
|
|
|
unsigned int count__ = (count); \
|
2022-11-08 02:05:54 +00:00
|
|
|
((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
|
2019-03-22 00:24:31 +00:00
|
|
|
})
|
2022-11-08 02:05:54 +00:00
|
|
|
|
|
|
|
#define ENGINE_INSTANCES_MASK(gt, first, count) \
|
|
|
|
__ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
|
|
|
|
|
2022-03-03 22:34:34 +00:00
|
|
|
#define RCS_MASK(gt) \
|
|
|
|
ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
|
2022-05-11 06:02:27 +00:00
|
|
|
#define BCS_MASK(gt) \
|
|
|
|
ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
|
2020-07-08 00:39:45 +00:00
|
|
|
#define VDBOX_MASK(gt) \
|
|
|
|
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
|
|
|
|
#define VEBOX_MASK(gt) \
|
|
|
|
ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
|
2022-03-01 23:15:39 +00:00
|
|
|
#define CCS_MASK(gt) \
|
|
|
|
ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
|
2019-03-22 00:24:31 +00:00
|
|
|
|
2022-05-19 08:57:30 +00:00
|
|
|
#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
|
|
|
|
|
2018-05-22 20:59:06 +00:00
|
|
|
/*
|
|
|
|
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
|
|
|
|
* All later gens can run the final buffer from the ppgtt
|
|
|
|
*/
|
2021-06-06 04:50:50 +00:00
|
|
|
#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
|
2018-05-22 20:59:06 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
|
2022-01-18 11:55:44 +00:00
|
|
|
#define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile)
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
|
2019-03-28 17:45:32 +00:00
|
|
|
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
|
2021-06-06 04:50:50 +00:00
|
|
|
#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
|
2020-10-15 12:21:37 +00:00
|
|
|
#define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
|
2010-11-09 09:17:32 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
|
2012-02-09 16:15:46 +00:00
|
|
|
|
2016-11-04 14:42:44 +00:00
|
|
|
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
|
2018-12-31 14:56:44 +00:00
|
|
|
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
|
2018-03-02 16:14:59 +00:00
|
|
|
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
|
2018-12-31 14:56:44 +00:00
|
|
|
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
|
2017-11-20 20:55:00 +00:00
|
|
|
|
|
|
|
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
|
|
|
|
|
2022-08-19 12:02:40 +00:00
|
|
|
#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
|
2018-09-26 20:12:22 +00:00
|
|
|
#define HAS_PPGTT(dev_priv) \
|
|
|
|
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
|
|
|
|
#define HAS_FULL_PPGTT(dev_priv) \
|
|
|
|
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
|
|
|
|
|
2017-10-06 22:18:18 +00:00
|
|
|
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
|
|
|
|
GEM_BUG_ON((sizes) == 0); \
|
2022-08-19 12:02:39 +00:00
|
|
|
((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
|
2017-10-06 22:18:18 +00:00
|
|
|
})
|
2016-11-04 14:42:44 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
|
2016-11-04 14:42:44 +00:00
|
|
|
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
|
2018-12-31 14:56:44 +00:00
|
|
|
(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
|
2010-11-09 09:17:32 +00:00
|
|
|
|
2012-12-17 15:21:27 +00:00
|
|
|
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
|
2016-11-30 15:43:04 +00:00
|
|
|
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
2015-12-16 17:18:37 +00:00
|
|
|
|
2019-10-17 13:38:31 +00:00
|
|
|
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
|
2021-06-06 04:50:50 +00:00
|
|
|
(IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
|
2019-10-17 13:38:31 +00:00
|
|
|
|
2018-02-22 20:05:35 +00:00
|
|
|
/* WaRsDisableCoarsePowerGating:skl,cnl */
|
2019-12-31 12:27:08 +00:00
|
|
|
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
|
2021-07-28 21:59:46 +00:00
|
|
|
(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
|
2016-04-05 12:56:16 +00:00
|
|
|
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
|
|
|
|
#define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
|
2018-06-28 13:34:49 +00:00
|
|
|
IS_GEMINILAKE(dev_priv) || \
|
|
|
|
IS_KABYLAKE(dev_priv))
|
2012-12-17 15:21:27 +00:00
|
|
|
|
2010-11-09 09:17:32 +00:00
|
|
|
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
|
|
|
* rows, which changed the alignment requirements and fence programming.
|
|
|
|
*/
|
2021-06-06 04:50:50 +00:00
|
|
|
#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
|
|
|
|
!(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
|
2018-12-31 14:56:44 +00:00
|
|
|
#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
|
|
|
|
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
|
2010-11-09 09:17:32 +00:00
|
|
|
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
|
2022-08-19 12:02:38 +00:00
|
|
|
#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
|
2010-11-09 09:17:32 +00:00
|
|
|
|
2016-10-13 10:02:58 +00:00
|
|
|
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
|
2013-06-24 17:29:34 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
|
2022-08-17 12:19:48 +00:00
|
|
|
#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
|
2015-05-18 14:10:01 +00:00
|
|
|
|
2022-09-07 09:10:43 +00:00
|
|
|
#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
|
|
|
|
|
2021-07-07 23:42:06 +00:00
|
|
|
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
|
2022-10-25 22:30:40 +00:00
|
|
|
#define HAS_CDCLK_SQUASH(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_squash)
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
|
2021-02-12 22:20:49 +00:00
|
|
|
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
|
2020-06-03 21:15:28 +00:00
|
|
|
#define HAS_PSR_HW_TRACKING(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
|
2022-08-19 12:02:44 +00:00
|
|
|
#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
|
2017-12-01 11:30:30 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
|
|
|
|
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
|
2017-12-01 11:30:30 +00:00
|
|
|
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
|
2012-11-23 17:30:39 +00:00
|
|
|
|
2019-04-19 13:48:36 +00:00
|
|
|
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
|
|
|
|
|
2022-08-19 12:02:46 +00:00
|
|
|
#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
|
2023-01-18 13:15:35 +00:00
|
|
|
#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
|
|
|
|
#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
|
|
|
|
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
|
drm/i915/skl: Add support to load SKL CSR firmware.
Display Context Save and Restore support is needed for
various SKL Display C states like DC5, DC6.
This implementation is added based on first version of DMC CSR program
that we received from h/w team.
Here we are using request_firmware based design.
Finally this firmware should end up in linux-firmware tree.
For SKL platform its mandatory to ensure that we load this
csr program before enabling DC states like DC5/DC6.
As CSR program gets reset on various conditions, we should ensure
to load it during boot and in future change to be added to load
this system resume sequence too.
v1: Initial relese as RFC patch
v2: Design change as per Daniel, Damien and Shobit's review comments
request firmware method followed.
v3: Some optimization and functional changes.
Pulled register defines into drivers/gpu/drm/i915/i915_reg.h
Used kmemdup to allocate and duplicate firmware content.
Ensured to free allocated buffer.
v4: Modified as per review comments from Satheesh and Daniel
Removed temporary buffer.
Optimized number of writes by replacing I915_WRITE with I915_WRITE64.
v5:
Modified as per review comemnts from Damien.
- Changed name for functions and firmware.
- Introduced HAS_CSR.
- Reverted back previous change and used csr_buf with u8 size.
- Using cpu_to_be64 for endianness change.
Modified as per review comments from Imre.
- Modified registers and macro names to be a bit closer to bspec terminology
and the existing register naming in the driver.
- Early return for non SKL platforms in intel_load_csr_program function.
- Added locking around CSR program load function as it may be called
concurrently during system/runtime resume.
- Releasing the fw before loading the program for consistency
- Handled error path during f/w load.
v6: Modified as per review comments from Imre.
- Corrected out_freecsr sequence.
v7: Modified as per review comments from Imre.
Fail loading fw if fw->size%8!=0.
v8: Rebase to latest.
v9: Rebase on top of -nightly (Damien)
v10: Enabled support for dmc firmware ver 1.0.
According to ver 1.0 in a single binary package all the firmware's that are
required for different stepping's of the product will be stored. The package
contains the css header, followed by the package header and the actual dmc
firmwares. Package header contains the firmware/stepping mapping table and
the corresponding firmware offsets to the individual binaries, within the
package. Each individual program binary contains the header and the payload
sections whose size is specified in the header section. This changes are done
to extract the specific firmaware from the package. (Animesh)
v11: Modified as per review comemnts from Imre.
- Added code comment from bpec for header structure elements.
- Added __packed to avoid structure padding.
- Added helper functions for stepping and substepping info.
- Added code comment for CSR_MAX_FW_SIZE.
- Disabled BXT firmware loading, will be enabled with dmc 1.0 support.
- Changed skl_stepping_info based on bspec, earlier used from config DB.
- Removed duplicate call of cpu_to_be* from intel_csr_load_program function.
- Used cpu_to_be32 instead of cpu_to_be64 as firmware binary in dword aligned.
- Added sanity check for header length.
- Added sanity check for mmio address got from firmware binary.
- kmalloc done separately for dmc header and dmc firmware. (Animesh)
v12: Modified as per review comemnts from Imre.
- Corrected the typo error in skl stepping info structure.
- Added out-of-bound access for skl_stepping_info.
- Sanity check for mmio address modified.
- Sanity check added for stepping and substeppig.
- Modified the intel_dmc_info structure, cache only the required header info. (Animesh)
v13: clarify firmware load error message.
The reason for a firmware loading failure can be obscure if the driver
is built-in. Provide an explanation to the user about the likely reason for
the failure and how to resolve it. (Imre)
v14: Suggested by Jani.
- fix s/I915/CONFIG_DRM_I915/ typo
- add fw_path to the firmware object instead of using a static ptr (Jani)
v15:
1) Changed the firmware name as dmc_gen9.bin, everytime for a new firmware version a symbolic link
with same name will help not to build kernel again.
2) Changes done as per review comments from Imre.
- Error check removed for intel_csr_ucode_init.
- Moved csr-specific data structure to intel_csr.h and optimization done on structure definition.
- fw->data used directly for parsing the header info & memory allocation
only done separately for payload. (Animesh)
v16:
- No need for out_regs label in i915_driver_load(), so removed it.
- Changed the firmware name as skl_dmc_ver1.bin, followed naming convention <platform>_dmc_<api-version>.bin (Animesh)
Issue: VIZ-2569
Signed-off-by: A.Sunil Kamath <sunil.kamath@intel.com>
Signed-off-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Animesh Manna <animesh.manna@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-05-04 12:58:44 +00:00
|
|
|
|
2022-04-19 19:33:08 +00:00
|
|
|
#define HAS_HECI_PXP(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->has_heci_pxp)
|
|
|
|
|
|
|
|
#define HAS_HECI_GSCFI(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->has_heci_gscfi)
|
|
|
|
|
|
|
|
#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
|
|
|
|
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
|
2021-03-02 11:02:59 +00:00
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
|
|
|
|
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
|
2016-11-03 08:39:46 +00:00
|
|
|
|
2022-10-26 22:20:51 +00:00
|
|
|
#define HAS_OA_BPC_REPORTING(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->has_oa_bpc_reporting)
|
2022-10-26 22:21:01 +00:00
|
|
|
#define HAS_OA_SLICE_CONTRIB_LIMITS(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->has_oa_slice_contrib_limits)
|
2022-10-26 22:20:51 +00:00
|
|
|
|
2021-12-08 14:16:10 +00:00
|
|
|
/*
|
|
|
|
* Set this flag, when platform requires 64K GTT page sizes or larger for
|
2022-02-18 18:47:42 +00:00
|
|
|
* device local memory access.
|
2021-12-08 14:16:10 +00:00
|
|
|
*/
|
|
|
|
#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
|
|
|
|
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
|
2017-08-17 13:45:27 +00:00
|
|
|
|
2022-08-19 12:02:42 +00:00
|
|
|
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
|
2019-10-25 15:37:22 +00:00
|
|
|
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
|
2019-10-18 09:07:49 +00:00
|
|
|
|
2022-09-06 23:49:25 +00:00
|
|
|
#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
|
|
|
|
|
2022-02-18 18:47:50 +00:00
|
|
|
/*
|
|
|
|
* Platform has the dedicated compression control state for each lmem surfaces
|
|
|
|
* stored in lmem to support the 3D and media compression formats.
|
|
|
|
*/
|
|
|
|
#define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs)
|
|
|
|
|
2019-07-25 00:18:06 +00:00
|
|
|
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
|
2015-08-12 14:43:36 +00:00
|
|
|
|
2022-08-19 12:02:41 +00:00
|
|
|
#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
|
2016-06-03 05:34:33 +00:00
|
|
|
|
2019-07-30 18:04:06 +00:00
|
|
|
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
|
|
|
|
|
2019-02-04 22:25:38 +00:00
|
|
|
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
|
2014-07-21 09:53:38 +00:00
|
|
|
|
2022-09-16 01:46:46 +00:00
|
|
|
#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
|
|
|
|
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
|
2016-10-14 14:26:50 +00:00
|
|
|
|
2022-05-05 21:38:03 +00:00
|
|
|
#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
|
|
|
|
|
2013-09-19 18:01:40 +00:00
|
|
|
/* DPF == dynamic parity feature */
|
2018-12-31 14:56:44 +00:00
|
|
|
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
|
2016-10-13 10:02:58 +00:00
|
|
|
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
|
|
|
|
2 : HAS_L3_DPF(dev_priv))
|
2012-07-25 03:47:31 +00:00
|
|
|
|
2022-08-19 12:02:44 +00:00
|
|
|
#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
|
2019-09-11 09:26:08 +00:00
|
|
|
|
2022-08-19 12:02:44 +00:00
|
|
|
#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
|
2018-11-30 23:20:47 +00:00
|
|
|
|
2022-01-24 19:31:35 +00:00
|
|
|
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
|
2021-01-25 20:08:18 +00:00
|
|
|
|
2021-08-23 12:25:32 +00:00
|
|
|
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
|
|
|
|
|
2019-09-13 10:04:07 +00:00
|
|
|
/* Only valid when HAS_DISPLAY() is true */
|
2020-05-04 18:15:57 +00:00
|
|
|
#define INTEL_DISPLAY_ENABLED(dev_priv) \
|
2022-06-10 08:54:29 +00:00
|
|
|
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \
|
|
|
|
!(dev_priv)->params.disable_display && \
|
|
|
|
!intel_opregion_headless_sku(dev_priv))
|
2019-09-13 10:04:07 +00:00
|
|
|
|
2022-01-20 21:29:47 +00:00
|
|
|
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
|
|
|
|
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
|
|
|
|
|
2021-01-29 18:29:41 +00:00
|
|
|
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
|
|
|
|
IS_ALDERLAKE_S(dev_priv))
|
|
|
|
|
2022-08-18 23:42:00 +00:00
|
|
|
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
|
2017-05-24 15:54:11 +00:00
|
|
|
|
2022-05-11 06:02:26 +00:00
|
|
|
#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
|
|
|
|
|
2022-06-01 15:07:25 +00:00
|
|
|
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
|
|
|
|
|
2022-10-05 15:41:59 +00:00
|
|
|
#define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \
|
|
|
|
GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
|
2022-09-29 11:46:58 +00:00
|
|
|
|
2016-07-05 09:40:20 +00:00
|
|
|
/* intel_device_info.c */
|
|
|
|
static inline struct intel_device_info *
|
|
|
|
mkwrite_device_info(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2018-12-31 14:56:44 +00:00
|
|
|
return (struct intel_device_info *)INTEL_INFO(dev_priv);
|
2016-07-05 09:40:20 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|