Merge tag 'drm-intel-next-2019-03-28' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:
- Make mmap code more asynchronous. Avoid full SET_DOMAIN on GTT mmap pagefault,
  and flushes pages on acquisition instead. Moves some of the work from mmap fault
  time to execbuf time to avoid lock contention during mmap access.

  Has neutral to positive impact on perf as the flushing moves to execbuf time
  in real world workloads on the current known userspaces due to recycling of BOs.

  If there exist an unknown non-recycling userspace, they should explicitly do the
  SET_DOMAIN and not rely on kernel doing implicit SET_DOMAIN because swapout/in
  might have happenedt.

- Restore the accidentally removed behaviour of returning object size on GEM_CREATE
  From 2011: ff72145bad ("drm: dumb scanout create/mmap for intel/radeon (v3)")

- Includes a some neutered patches to prepare to complete the earlier Mesa
  recovery feature uAPI. Looking to enable this in the next PR.

Driver Changes:

- Add Elkhartlake (Gen11) support code and PCI IDs
- Add missing Amberlake PCI ID 0x87CA (Ville)
- Fix to Bugzilla #109780: Pick the first mode from EDID as the fixed mode when there is no preferred mode (Ville)
- Fix GCC 4.8 build by using __is_constexpr() (Chris, Randy, Uma)
- Add "Broadcast RGB", "force_audio" and "max_bpc" properties to DP MST (Ville)
- Remove 8bpc limitation from DP MST (Ville)
- Fix changing between limited and full range RGB output in DP fastsets (Ville)
- Reject unsupported HDR formats (Maarten)
- Handle YUV subpixel support better (Maarten)

- Various plane watermarks fixes and cleaning of the code (Ville)
- Icelake port sync master select fix (Manasi)
- Icelake VEBOX disable bitmask fix (Jose)
- Close a race where userspace could see incompletely initialized GEM context (Chris)
- Avoid C3 on i945gm to keep vblank interrupts steady (Ville)
- Avoid recalculating PLL HW readout each time (Lucas)
- A ton of patches to modularize uncore code (Daniel)

- Instead of storing media fuse value, immediately derive engine masks (Daniele)
- Reduce struct_mutex usage (Chris)
- Iterate over child devices to initialize ddi_port_info (Jani)
- Fixes to return correct error values when bailing out of functions (Dan)
- Use bitmap_zalloc() (Andy)
- Reorder and clarify Gen3/4 code (Ville)
- Refactor out common code in display mode handling (Ville)
- GuC code fixes (Sujaritha, Michal)
- Selftest improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190328151515.GA9606@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2019-03-29 11:01:04 +10:00
commit e0a3def2b1
99 changed files with 3755 additions and 2248 deletions

View file

@ -46,6 +46,7 @@ i915-y := i915_drv.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \

View file

@ -1848,7 +1848,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);

View file

@ -327,6 +327,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
intel_uncore_forcewake_get(uncore, fw);
I915_WRITE_FW(reg, 0x1);
intel_uncore_write_fw(uncore, reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(dev_priv, fw);
intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@ -552,9 +553,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**

View file

@ -988,7 +988,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
intel_uncore_forcewake_get(gvt->dev_priv,
intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@ -1010,7 +1010,7 @@ static int workload_thread(void *priv)
complete_current_workload(gvt, ring_id);
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);

View file

@ -409,9 +409,8 @@ static void print_context_stats(struct seq_file *m,
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
snprintf(name, sizeof(name), "%s/%d",
task ? task->comm : "<unknown>",
ctx->user_handle);
snprintf(name, sizeof(name), "%s",
task ? task->comm : "<unknown>");
rcu_read_unlock();
print_file_stats(m, name, stats);
@ -881,7 +880,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
engine->name, ENGINE_READ(engine, RING_IMR));
}
}
@ -1094,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
@ -1122,7 +1121,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv,
intel_get_cagf(dev_priv, rpstat));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
@ -1414,13 +1413,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore *uncore = &i915->uncore;
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
i915->uncore.user_forcewake.count);
uncore->user_forcewake.count);
for_each_fw_domain(fw_domain, i915, tmp)
for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count));
@ -2059,12 +2059,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 rpup, rpupei;
u32 rpdown, rpdownei;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(rps->power.mode));
@ -4250,7 +4250,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
intel_uncore_forcewake_user_get(i915);
intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
}
@ -4262,7 +4262,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
intel_uncore_forcewake_user_put(i915);
intel_uncore_forcewake_user_put(&i915->uncore);
intel_runtime_pm_put(i915,
(intel_wakeref_t)(uintptr_t)file->private_data);

View file

@ -935,46 +935,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_engines_cleanup(dev_priv);
}
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int mmio_bar;
int mmio_size;
mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
if (dev_priv->regs == NULL) {
DRM_ERROR("failed to map registers\n");
return -EIO;
}
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
return 0;
}
static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
intel_teardown_mchbar(dev_priv);
pci_iounmap(pdev, dev_priv->regs);
}
/**
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
@ -994,15 +954,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (i915_get_bridge_dev(dev_priv))
return -EIO;
ret = i915_mmio_setup(dev_priv);
ret = intel_uncore_init(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
intel_uncore_init(dev_priv);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_init_mmio(dev_priv);
intel_uncore_prune(dev_priv);
intel_uncore_prune(&dev_priv->uncore);
intel_uc_init_mmio(dev_priv);
@ -1015,8 +976,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev_priv);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@ -1029,8 +990,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev_priv);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@ -2091,7 +2052,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
intel_uncore_suspend(dev_priv);
intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@ -2287,7 +2248,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
intel_uncore_resume_early(dev_priv);
intel_uncore_resume_early(&dev_priv->uncore);
i915_check_and_clear_faults(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
@ -2691,7 +2654,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
err = intel_wait_for_register(dev_priv,
err = intel_wait_for_register(&dev_priv->uncore,
VLV_GTLC_SURVIVABILITY_REG,
VLV_GFX_CLK_STATUS_BIT,
VLV_GFX_CLK_STATUS_BIT,
@ -2857,7 +2820,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_uncore_suspend(dev_priv);
intel_uncore_suspend(&dev_priv->uncore);
ret = 0;
if (INTEL_GEN(dev_priv) >= 11) {
@ -2874,7 +2837,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(dev_priv);
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@ -2891,7 +2854,7 @@ static int intel_runtime_suspend(struct device *kdev)
enable_rpm_wakeref_asserts(dev_priv);
intel_runtime_pm_cleanup(dev_priv);
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->runtime_pm.suspended = true;
@ -2919,7 +2882,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
assert_forcewakes_inactive(dev_priv);
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
@ -2945,7 +2908,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->runtime_pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
if (INTEL_GEN(dev_priv) >= 11) {
@ -2971,7 +2934,7 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
intel_uncore_runtime_resume(dev_priv);
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@ -3115,7 +3078,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),

View file

@ -92,8 +92,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20190320"
#define DRIVER_TIMESTAMP 1553069028
#define DRIVER_DATE "20190328"
#define DRIVER_TIMESTAMP 1553776914
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -216,7 +216,12 @@ struct drm_i915_file_private {
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
struct idr context_idr;
struct mutex context_idr_lock; /* guards context_idr */
struct idr vm_idr;
struct mutex vm_idr_lock; /* guards vm_idr */
unsigned int bsd_engine;
@ -952,6 +957,7 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@ -1505,8 +1511,6 @@ struct drm_i915_private {
*/
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
void __iomem *regs;
struct intel_uncore uncore;
struct i915_virtual_gpu vgpu;
@ -2039,6 +2043,14 @@ struct drm_i915_private {
struct i915_vma *scratch;
} gt;
/* For i945gm vblank irq vs. C3 workaround */
struct {
struct work_struct work;
struct pm_qos_request pm_qos;
u8 c3_disable_latency;
u8 enabled;
} i945gm_vblank;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@ -2102,6 +2114,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
return container_of(huc, struct drm_i915_private, huc);
}
static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
{
return container_of(uncore, struct drm_i915_private, uncore);
}
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@ -2315,6 +2332,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@ -2353,7 +2371,8 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0)
INTEL_DEVID(dev_priv) == 0x87C0 || \
INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@ -2435,6 +2454,17 @@ static inline unsigned int i915_sg_segment_size(void)
#define ALL_ENGINES (~0u)
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
unsigned int first__ = (first); \
unsigned int count__ = (count); \
(INTEL_INFO(dev_priv)->engine_mask & \
GENMASK(first__ + count__ - 1, first__)) >> first__; \
})
#define VDBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
@ -2960,6 +2990,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
__i915_gem_object_flush_map(obj, 0, obj->base.size);
}
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
@ -3438,18 +3476,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
@ -3465,46 +3506,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
*
* You have been warned.
*/
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
#define I915_READ64_2x32(lower_reg__, upper_reg__) \
__I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
u32 upper, lower, old_upper, loop = 0; \
upper = I915_READ(upper_reg); \
do { \
old_upper = upper; \
lower = I915_READ(lower_reg); \
upper = I915_READ(upper_reg); \
} while (upper != old_upper && loop++ < 2); \
(u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
__raw_read(32, l)
__raw_read(64, q)
__raw_write(8, b)
__raw_write(16, w)
__raw_write(32, l)
__raw_write(64, q)
#undef __raw_read
#undef __raw_write
#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@ -3532,10 +3539,10 @@ __raw_write(64, q)
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0

View file

@ -622,14 +622,15 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
u64 size,
u64 *size_p,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
u64 size;
int ret;
size = roundup(size, PAGE_SIZE);
size = round_up(*size_p, PAGE_SIZE);
if (size == 0)
return -EINVAL;
@ -645,6 +646,7 @@ i915_gem_create(struct drm_file *file,
return ret;
*handle_p = handle;
*size_p = obj->base.size;
return 0;
}
@ -657,7 +659,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
args->size, &args->handle);
&args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@ -682,7 +684,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
i915_gem_flush_free_objects(dev_priv);
return i915_gem_create(file, dev_priv,
args->size, &args->handle);
&args->size, &args->handle);
}
static inline enum fb_op_origin
@ -1484,17 +1486,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
/*
* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
if (write_domain && read_domains != write_domain)
return -EINVAL;
if (!read_domains)
return 0;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Try to flush the object off the GPU without holding the lock.
/*
* Already in the desired write domain? Nothing for us to do!
*
* We apply a little bit of cunning here to catch a broader set of
* no-ops. If obj->write_domain is set, we must be in the same
* obj->read_domains, and only that domain. Therefore, if that
* obj->write_domain matches the request read_domains, we are
* already in the same read/write domain and can skip the operation,
* without having to further check the requested write_domain.
*/
if (READ_ONCE(obj->write_domain) == read_domains) {
err = 0;
goto out;
}
/*
* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
@ -1713,6 +1735,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@ -1740,7 +1765,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
return 2;
return 3;
}
static inline struct i915_ggtt_view
@ -1808,17 +1833,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Try to flush the object off the GPU first without holding the lock.
* Upon acquiring the lock, we will perform our sanity checks and then
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
@ -1874,10 +1888,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_unpin;
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
@ -2534,6 +2544,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
obj->cache_dirty = false;
}
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
@ -2735,6 +2753,33 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto out_unlock;
}
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
{
enum i915_map_type has_type;
void *ptr;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
return;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (has_type == I915_MAP_WC)
return;
drm_clflush_virt_range(ptr + offset, size);
if (size == obj->base.size) {
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
obj->cache_dirty = false;
}
}
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
@ -4195,7 +4240,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
kfree(obj->bit_17);
bitmap_free(obj->bit_17);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@ -4318,7 +4363,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
@ -4339,7 +4384,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
*/
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
mutex_lock(&i915->drm.struct_mutex);
@ -4354,7 +4399,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
flush_workqueue(i915->wq);
@ -4388,6 +4432,8 @@ void i915_gem_suspend(struct drm_i915_private *i915)
*/
GEM_BUG_ON(i915->gt.awake);
intel_uc_suspend(i915);
intel_runtime_pm_put(i915, wakeref);
}
@ -4438,7 +4484,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
@ -4460,7 +4506,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
goto err_wedged;
out_unlock:
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
@ -4549,7 +4595,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
dev_priv->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@ -4604,14 +4650,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
@ -4692,6 +4738,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
i915_gem_object_set_cache_coherency(engine->default_state,
I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
@ -4815,7 +4863,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* just magically go away.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
if (ret) {
@ -4877,7 +4925,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_init_hw;
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@ -4912,7 +4960,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_uc_misc:

File diff suppressed because it is too large Load diff

View file

@ -126,11 +126,6 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
atomic_dec(&ctx->hw_id_pin_count);
}
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
@ -153,6 +148,11 @@ void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,

View file

@ -41,6 +41,8 @@ struct i915_gem_context {
/** file_priv: owning file descriptor */
struct drm_i915_file_private *file_priv;
struct i915_timeline *timeline;
/**
* @ppgtt: unique address space (GTT)
*
@ -129,15 +131,6 @@ struct i915_gem_context {
struct list_head active_engines;
struct mutex mutex;
/**
* @user_handle: userspace identifier
*
* A unique per-file identifier is generated from
* &drm_i915_file_private.contexts.
*/
u32 user_handle;
#define DEFAULT_CONTEXT_HANDLE 0
struct i915_sched_attr sched;
/** hw_contexts: per-engine logical HW state */

View file

@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}

View file

@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
{
GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
i915_request_add(cache->rq);
@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (IS_ERR(cmd))
return PTR_ERR(cmd);
err = i915_gem_object_set_to_wc_domain(obj, false);
if (err)
goto err_unmap;
batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);

View file

@ -585,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev_priv) ||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
} else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*
* Reports indicate that the swizzling actually
* varies depending upon page placement inside the
* channels, i.e. we see swizzled pages where the
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else {
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@ -636,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*
* Reports indicate that the swizzling actually
* varies depending upon page placement inside the
* channels, i.e. we see swizzled pages where the
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
@ -766,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");

View file

@ -1937,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
GEM_BUG_ON(ppgtt->base.vm.closed);
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context.
@ -1975,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
if (!ppgtt->pin_count)
return;
ppgtt->pin_count = 0;
i915_vma_unpin(ppgtt->vma);
}
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
@ -2069,8 +2082,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *i915,
struct drm_i915_file_private *fpriv)
i915_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_hw_ppgtt *ppgtt;
@ -2078,19 +2090,11 @@ i915_ppgtt_create(struct drm_i915_private *i915,
if (IS_ERR(ppgtt))
return ppgtt;
ppgtt->vm.file = fpriv;
trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
void i915_ppgtt_close(struct i915_address_space *vm)
{
GEM_BUG_ON(vm->closed);
vm->closed = true;
}
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
@ -2657,7 +2661,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);

View file

@ -396,6 +396,8 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
u32 user_handle;
};
struct gen6_hw_ppgtt {
@ -603,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_get(&ppgtt->ref);
kref_get(&ppgtt->ref);
return ppgtt;
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@ -620,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);

View file

@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
ret = 0;
out:
i915_gem_obj_finish_shmem_access(so->obj);
return ret;

View file

@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (!obj->bit_17) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
bitmap_free(obj->bit_17);
obj->bit_17 = NULL;
}

View file

@ -454,8 +454,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->hw_id,
ctx->sched_attr.priority, ctx->guilty, ctx->active);
}
@ -758,11 +758,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d])",
err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
ee->context.pid,
ee->context.handle,
ee->context.hw_id);
ee->context.pid);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@ -1138,7 +1136,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
else
@ -1146,32 +1144,32 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
if (INTEL_GEN(dev_priv) >= 4) {
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
if (INTEL_GEN(dev_priv) >= 8) {
ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
} else {
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
ee->ipeir = ENGINE_READ(engine, IPEIR);
ee->ipehr = ENGINE_READ(engine, IPEHR);
}
intel_engine_get_instdone(engine, &ee->instdone);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->instpm = ENGINE_READ(engine, RING_INSTPM);
ee->acthd = intel_engine_get_active_head(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
ee->ctl = I915_READ_CTL(engine);
ee->start = ENGINE_READ(engine, RING_START);
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
if (INTEL_GEN(dev_priv) > 2)
ee->mode = I915_READ_MODE(engine);
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
@ -1216,10 +1214,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (IS_GEN(dev_priv, 6))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
else if (IS_GEN(dev_priv, 7))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
ENGINE_READ(engine, RING_PP_DIR_BASE);
else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
@ -1330,7 +1328,6 @@ static void record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
@ -1604,7 +1601,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
if (INTEL_GEN(dev_priv) >= 5)
error->ccid = I915_READ(CCID);
error->ccid = I915_READ(CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
if (IS_GEN_RANGE(dev_priv, 6, 7)) {

View file

@ -116,7 +116,6 @@ struct i915_gpu_state {
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
u32 handle;
u32 hw_id;
int active;
int guilty;

View file

@ -30,6 +30,7 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/cpuidle.h>
#include <linux/circ_buf.h>
#include <drm/drm_irq.h>
#include <drm/drm_drv.h>
@ -268,7 +269,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bank,
const unsigned int bit)
{
void __iomem * const regs = i915->regs;
void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
@ -748,13 +749,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
if (!dev_priv->opregion.asle)
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
if (!i915_has_asle(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
@ -1471,7 +1480,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
void __iomem * const regs = i915->regs;
void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
@ -2868,7 +2877,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
void __iomem * const regs = dev_priv->regs;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@ -2902,7 +2911,7 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = i915->regs;
void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
@ -2986,7 +2995,7 @@ static void
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
void __iomem * const regs = i915->regs;
void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
@ -3029,7 +3038,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
void __iomem * const regs = dev_priv->regs;
void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@ -3070,7 +3079,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->regs;
void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
@ -3123,6 +3132,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->i945gm_vblank.enabled++ == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
return i8xx_enable_vblank(dev, pipe);
}
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -3187,6 +3206,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i8xx_disable_vblank(dev, pipe);
if (--dev_priv->i945gm_vblank.enabled == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
}
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -3220,6 +3249,60 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void i945gm_vblank_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, i945gm_vblank.work);
/*
* Vblank interrupts fail to wake up the device from C3,
* hence we want to prevent C3 usage while vblank interrupts
* are enabled.
*/
pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
dev_priv->i945gm_vblank.c3_disable_latency :
PM_QOS_DEFAULT_VALUE);
}
static int cstate_disable_latency(const char *name)
{
const struct cpuidle_driver *drv;
int i;
drv = cpuidle_get_driver();
if (!drv)
return 0;
for (i = 0; i < drv->state_count; i++) {
const struct cpuidle_state *state = &drv->states[i];
if (!strcmp(state->name, name))
return state->exit_latency ?
state->exit_latency - 1 : 0;
}
return 0;
}
static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
{
INIT_WORK(&dev_priv->i945gm_vblank.work,
i945gm_vblank_work_func);
dev_priv->i945gm_vblank.c3_disable_latency =
cstate_disable_latency("C3");
pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
}
static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
{
cancel_work_sync(&dev_priv->i945gm_vblank.work);
pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
}
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_NOP(dev_priv))
@ -3351,7 +3434,7 @@ static void gen8_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
gen8_master_intr_disable(dev_priv->regs);
gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
@ -3393,7 +3476,7 @@ static void gen11_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
gen11_master_intr_disable(dev_priv->regs);
gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
@ -3998,7 +4081,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
gen8_master_intr_enable(dev_priv->regs);
gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
@ -4060,7 +4143,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
gen11_master_intr_enable(dev_priv->regs);
gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@ -4517,6 +4600,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
if (IS_I945GM(dev_priv))
i945gm_vblank_work_init(dev_priv);
intel_hpd_init_work(dev_priv);
INIT_WORK(&rps->work, gen6_pm_rps_work);
@ -4556,13 +4642,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
/*
* Opt out of the vblank disable timer on everything except gen2.
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
if (!IS_GEN(dev_priv, 2))
dev->vblank_disable_immediate = true;
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@ -4639,6 +4719,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_I945GM(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_reset;
dev->driver->irq_handler = i915_irq_handler;
dev->driver->enable_vblank = i945gm_enable_vblank;
dev->driver->disable_vblank = i945gm_disable_vblank;
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
@ -4669,6 +4756,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
if (IS_I945GM(i915))
i945gm_vblank_work_fini(i915);
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}

View file

@ -730,6 +730,14 @@ static const struct intel_device_info intel_icelake_11_info = {
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info intel_elkhartlake_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.is_alpha_support = 1,
.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
.ppgtt_size = 36,
};
#undef GEN
#undef PLATFORM
@ -799,6 +807,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
INTEL_EHL_IDS(&intel_elkhartlake_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);

View file

@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock;
}
ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
if (ret)
goto err_unref;
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_uncore *uncore = &stream->dev_priv->uncore;
I915_WRITE(GEN7_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_uncore *uncore = &stream->dev_priv->uncore;
I915_WRITE(GEN8_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* references will effectively disable RC6.
*/
stream->wakeref = intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
@ -2127,7 +2125,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:

View file

@ -161,10 +161,10 @@
*/
#define REG_FIELD_PREP(__mask, __val) \
((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(__mask)) + \
BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
/**
* REG_FIELD_GET() - Extract a u32 bitfield value
@ -434,9 +434,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
@ -2568,12 +2568,12 @@ enum i915_power_well_id {
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
#define PWRCTX_EN (1 << 0)
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define IPEIR(base) _MMIO((base) + 0x88)
#define IPEHR(base) _MMIO((base) + 0x8c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
@ -2747,7 +2747,7 @@ enum i915_power_well_id {
#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1 << 9)
#define INSTPM_SYNC_FLUSH (1 << 5)
#define ACTHD _MMIO(0x20c8)
#define ACTHD(base) _MMIO((base) + 0xc8)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
@ -2953,7 +2953,7 @@ enum i915_power_well_id {
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_EU_DISABLE _MMIO(0x9134)
#define GEN11_EU_DIS_MASK 0xFF
@ -3947,7 +3947,7 @@ enum i915_power_well_id {
/*
* Logical Context regs
*/
#define CCID _MMIO(0x2180)
#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
@ -9352,7 +9352,7 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
_TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE (1 << 4)
#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0

View file

@ -66,7 +66,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
return to_request(fence)->timeline->name;
return to_request(fence)->gem_context->name ?: "[i915]";
}
static bool i915_fence_signaled(struct dma_fence *fence)
@ -167,7 +167,6 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@ -993,6 +992,60 @@ void i915_request_skip(struct i915_request *rq, int error)
memset(vaddr + head, 0, rq->postfix - head);
}
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
struct i915_timeline *timeline = rq->timeline;
struct i915_request *prev;
/*
* Dependency tracking and request ordering along the timeline
* is special cased so that we can eliminate redundant ordering
* operations while building the request (we know that the timeline
* itself is ordered, and here we guarantee it).
*
* As we know we will need to emit tracking along the timeline,
* we embed the hooks into our request struct -- at the cost of
* having to have specialised no-allocation interfaces (which will
* be beneficial elsewhere).
*
* A second benefit to open-coding i915_request_await_request is
* that we can apply a slight variant of the rules specialised
* for timelines that jump between engines (such as virtual engines).
* If we consider the case of virtual engine, we must emit a dma-fence
* to prevent scheduling of the second request until the first is
* complete (to maximise our greedy late load balancing) and this
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
prev = i915_active_request_raw(&timeline->last_request,
&rq->i915->drm.struct_mutex);
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
else
__i915_sw_fence_await_dma_fence(&rq->submit,
&prev->fence,
&rq->dmaq);
if (rq->engine->schedule)
__i915_sched_node_add_dependency(&rq->sched,
&prev->sched,
&rq->dep,
0);
}
spin_lock_irq(&timeline->lock);
list_add_tail(&rq->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
__i915_active_request_set(&timeline->last_request, rq);
return prev;
}
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@ -1037,37 +1090,11 @@ void i915_request_add(struct i915_request *request)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
/*
* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
* see a more recent value in the hws than we are tracking.
*/
prev = i915_active_request_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
if (prev && !i915_request_completed(prev)) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
__i915_sched_node_add_dependency(&request->sched,
&prev->sched,
&request->dep,
0);
}
spin_lock_irq(&timeline->lock);
list_add_tail(&request->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
__i915_active_request_set(&timeline->last_request, request);
prev = __i915_request_add_to_timeline(request);
list_add_tail(&request->ring_link, &ring->request_list);
if (list_is_first(&request->ring_link, &ring->request_list)) {
GEM_TRACE("marking %s as active\n", ring->timeline->name);
if (list_is_first(&request->ring_link, &ring->request_list))
list_add(&ring->active_link, &request->i915->gt.active_rings);
}
request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;

View file

@ -128,7 +128,10 @@ struct i915_request {
* It is used by the driver to then queue the request for execution.
*/
struct i915_sw_fence submit;
wait_queue_entry_t submitq;
union {
wait_queue_entry_t submitq;
struct i915_sw_dma_fence_cb dmaq;
};
struct list_head execute_cb;
/*

View file

@ -245,10 +245,12 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
unsigned int retry)
{
struct intel_uncore *uncore = &dev_priv->uncore;
int ret;
I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
intel_uncore_write_fw(uncore, ILK_GDSR,
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@ -257,8 +259,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
goto out;
}
I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
intel_uncore_write_fw(uncore, ILK_GDSR,
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@ -268,8 +271,8 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
}
out:
I915_WRITE_FW(ILK_GDSR, 0);
POSTING_READ_FW(ILK_GDSR);
intel_uncore_write_fw(uncore, ILK_GDSR, 0);
intel_uncore_posting_read_fw(uncore, ILK_GDSR);
return ret;
}
@ -277,6 +280,7 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
u32 hw_domain_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
int err;
/*
@ -284,10 +288,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* for fifo space for the write or forcewake the chip for
* the read
*/
I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
err = __intel_wait_for_register_fw(dev_priv,
err = __intel_wait_for_register_fw(uncore,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
@ -330,6 +334,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
@ -377,10 +382,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
I915_WRITE_FW(sfc_forced_lock,
I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
if (__intel_wait_for_register_fw(dev_priv,
if (__intel_wait_for_register_fw(uncore,
sfc_forced_lock_ack,
sfc_forced_lock_ack_bit,
sfc_forced_lock_ack_bit,
@ -389,7 +393,7 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
return 0;
}
if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
return sfc_reset_bit;
return 0;
@ -465,13 +469,13 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_uncore *uncore = &engine->i915->uncore;
int ret;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
ret = __intel_wait_for_register_fw(dev_priv,
ret = __intel_wait_for_register_fw(uncore,
RING_RESET_CTL(engine->mmio_base),
RESET_CTL_READY_TO_RESET,
RESET_CTL_READY_TO_RESET,
@ -569,7 +573,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
/*
* We stop engines, otherwise we might get failed reset and a
@ -593,7 +597,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
ret = reset(i915, engine_mask, retry);
preempt_enable();
}
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@ -622,9 +626,9 @@ int intel_reset_guc(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(i915, guc_domain);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@ -642,7 +646,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@ -713,7 +717,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
@ -1169,19 +1173,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
{
I915_WRITE(reg, I915_READ(reg));
}
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
{
u32 eir;
if (!IS_GEN(dev_priv, 2))
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
clear_register(dev_priv, PGTBL_ER);
if (INTEL_GEN(dev_priv) < 4)
I915_WRITE(IPEIR, I915_READ(IPEIR));
clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
else
I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
clear_register(dev_priv, IPEIR_I965);
I915_WRITE(EIR, I915_READ(EIR));
clear_register(dev_priv, EIR);
eir = I915_READ(EIR);
if (eir) {
/*

View file

@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
}
struct i915_sw_dma_fence_cb {
struct dma_fence_cb base;
struct i915_sw_fence *fence;
};
struct i915_sw_dma_fence_cb_timer {
struct i915_sw_dma_fence_cb base;
struct dma_fence *dma;
@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
return ret;
}
static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
struct dma_fence_cb *data)
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
i915_sw_fence_complete(cb->fence);
}
int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
struct i915_sw_dma_fence_cb *cb)
{
int ret;
debug_fence_assert(fence);
if (dma_fence_is_signaled(dma))
return 0;
cb->fence = fence;
i915_sw_fence_await(fence);
ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
i915_sw_fence_complete(fence);
if (ret == -ENOENT) /* fence already signaled */
ret = 0;
}
return ret;
}
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,

View file

@ -9,14 +9,13 @@
#ifndef _I915_SW_FENCE_H_
#define _I915_SW_FENCE_H_
#include <linux/dma-fence.h>
#include <linux/gfp.h>
#include <linux/kref.h>
#include <linux/notifier.h> /* for NOTIFY_DONE */
#include <linux/wait.h>
struct completion;
struct dma_fence;
struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
struct i915_sw_dma_fence_cb {
struct dma_fence_cb base;
struct i915_sw_fence *fence;
};
int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
struct i915_sw_dma_fence_cb *cb);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,

View file

@ -197,7 +197,6 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *timeline,
const char *name,
struct i915_vma *hwsp)
{
void *vaddr;
@ -213,7 +212,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
timeline->i915 = i915;
timeline->name = name;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
@ -342,7 +340,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
const char *name,
struct i915_vma *global_hwsp)
{
struct i915_timeline *timeline;
@ -352,7 +349,7 @@ i915_timeline_create(struct drm_i915_private *i915,
if (!timeline)
return ERR_PTR(-ENOMEM);
err = i915_timeline_init(i915, timeline, name, global_hwsp);
err = i915_timeline_init(i915, timeline, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);

View file

@ -32,7 +32,6 @@
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *tl,
const char *name,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
@ -57,7 +56,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
const char *name,
struct i915_vma *global_hwsp);
static inline struct i915_timeline *

View file

@ -71,7 +71,6 @@ struct i915_timeline {
struct i915_active_request barrier;
struct list_head link;
const char *name;
struct drm_i915_private *i915;
struct kref kref;

View file

@ -0,0 +1,61 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/nospec.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <uapi/drm/i915_drm.h>
#include "i915_user_extensions.h"
#include "i915_utils.h"
int i915_user_extensions(struct i915_user_extension __user *ext,
const i915_user_extension_fn *tbl,
unsigned int count,
void *data)
{
unsigned int stackdepth = 512;
while (ext) {
int i, err;
u32 name;
u64 next;
if (!stackdepth--) /* recursion vs useful flexibility */
return -E2BIG;
err = check_user_mbz(&ext->flags);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
err = check_user_mbz(&ext->rsvd[i]);
if (err)
return err;
}
if (get_user(name, &ext->name))
return -EFAULT;
err = -EINVAL;
if (name < count) {
name = array_index_nospec(name, count);
if (tbl[name])
err = tbl[name](ext, data);
}
if (err)
return err;
if (get_user(next, &ext->next_extension) ||
overflows_type(next, ext))
return -EFAULT;
ext = u64_to_user_ptr(next);
}
return 0;
}

View file

@ -0,0 +1,20 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#ifndef I915_USER_EXTENSIONS_H
#define I915_USER_EXTENSIONS_H
struct i915_user_extension;
typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
void *data);
int i915_user_extensions(struct i915_user_extension __user *ext,
const i915_user_extension_fn *tbl,
unsigned int count,
void *data);
#endif /* I915_USER_EXTENSIONS_H */

View file

@ -105,6 +105,37 @@
__T; \
})
/*
* container_of_user: Extract the superclass from a pointer to a member.
*
* Exactly like container_of() with the exception that it plays nicely
* with sparse for __user @ptr.
*/
#define container_of_user(ptr, type, member) ({ \
void __user *__mptr = (void __user *)(ptr); \
BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type __user *)(__mptr - offsetof(type, member))); })
/*
* check_user_mbz: Check that a user value exists and is zero
*
* Frequently in our uABI we reserve space for future extensions, and
* two ensure that userspace is prepared we enforce that space must
* be zero. (Then any future extension can safely assume a default value
* of 0.)
*
* check_user_mbz() combines checking that the user pointer is accessible
* and that the contained value is zero.
*
* Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
*/
#define check_user_mbz(U) ({ \
typeof(*(U)) mbz__; \
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;

View file

@ -60,22 +60,23 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u64 magic;
u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");

View file

@ -861,7 +861,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
if (intel_wait_for_register(&dev_priv->uncore,
PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
@ -1039,7 +1040,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
if (intel_wait_for_register(&dev_priv->uncore,
PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
@ -1179,11 +1181,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 pll_id;
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
}
@ -1361,7 +1362,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_display_mode *fixed_mode;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
@ -1411,15 +1412,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
/* fill mode info from VBT */
mutex_lock(&dev->mode_config.mutex);
intel_dsi_vbt_get_modes(intel_dsi);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@ -1427,12 +1421,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else

View file

@ -1247,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
if (!info->alternate_ddc_pin)
return;
for_each_port_masked(p, (1 << port) - 1) {
for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
if (info->alternate_ddc_pin != i->alternate_ddc_pin)
if (p == port || !i->present ||
info->alternate_ddc_pin != i->alternate_ddc_pin)
continue;
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
@ -1264,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
* Due to parsing the ports in alphabetical order,
* a higher port will always clobber a lower one.
* Due to parsing the ports in child device order,
* a later device will always clobber an earlier one.
*/
i->supports_dvi = false;
i->supports_hdmi = false;
@ -1283,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel)
return;
for_each_port_masked(p, (1 << port) - 1) {
for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
if (info->alternate_aux_channel != i->alternate_aux_channel)
if (p == port || !i->present ||
info->alternate_aux_channel != i->alternate_aux_channel)
continue;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
@ -1300,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
* Due to parsing the ports in alphabetical order,
* a higher port will always clobber a lower one.
* Due to parsing the ports in child device order,
* a later device will always clobber an earlier one.
*/
i->supports_dp = false;
i->alternate_aux_channel = 0;
@ -1349,49 +1351,58 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
u8 bdb_version)
static enum port dvo_port_to_port(u8 dvo_port)
{
struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
int i, j;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
/* Each DDI port can have more than one value on the "DVO Port" field,
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
{DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
static const int dvo_ports[][3] = {
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
[PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
};
enum port port;
int i;
/*
* Find the first child device to reference the port, report if more
* than one found.
*/
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
for (j = 0; j < 3; j++) {
if (dvo_ports[port][j] == -1)
for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
if (dvo_ports[port][i] == -1)
break;
if (it->dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
} else {
child = it;
}
}
if (dvo_port == dvo_ports[port][i])
return port;
}
}
if (!child)
return PORT_NONE;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv,
const struct child_device_config *child,
u8 bdb_version)
{
struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
port = dvo_port_to_port(child->dvo_port);
if (port == PORT_NONE)
return;
info = &dev_priv->vbt.ddi_port_info[port];
if (info->present) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
return;
}
info->present = true;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@ -1523,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
enum port port;
const struct child_device_config *child;
int i;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
if (!dev_priv->vbt.child_dev_num)
return;
if (bdb_version < 155)
return;
for (port = PORT_A; port < I915_MAX_PORTS; port++)
parse_ddi_port(dev_priv, port, bdb_version);
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
child = dev_priv->vbt.child_dev + i;
parse_ddi_port(dev_priv, child, bdb_version);
}
}
static void

View file

@ -234,7 +234,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@ -964,7 +965,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("DPLL0 not locked\n");
@ -978,9 +979,9 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (intel_wait_for_register(dev_priv,
LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
1))
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
@ -1323,7 +1324,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
@ -1344,7 +1345,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK,
BXT_DE_PLL_LOCK,

View file

@ -435,7 +435,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000))
@ -489,7 +489,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000)) {
@ -542,7 +542,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");

View file

@ -486,7 +486,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (IS_ICELAKE(dev_priv)) {
} else if (IS_GEN(dev_priv, 11)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;

View file

@ -1240,24 +1240,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
u32 cfgcr1_val, cfgcr2_val;
u32 p0, p1, p2, dco_freq;
cfgcr1_reg = DPLL_CFGCR1(pll_id);
cfgcr2_reg = DPLL_CFGCR2(pll_id);
p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@ -1292,10 +1283,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
break;
}
dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
* 24 * 1000;
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1000) / 0x8000;
dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
* 24 * 1000) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
return 0;
@ -1304,24 +1296,15 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
struct intel_dpll_hw_state *pll_state)
{
u32 cfgcr0, cfgcr1;
u32 p0, p1, p2, dco_freq, ref_clock;
if (INTEL_GEN(dev_priv) >= 11) {
cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
} else {
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
}
p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@ -1356,9 +1339,10 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
* ref_clock;
dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
@ -1390,25 +1374,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
enum port port)
const struct intel_dpll_hw_state *pll_state)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 mg_pll_div0, mg_clktop_hsclkctl;
u32 m1, m2_int, m2_frac, div1, div2, refclk;
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
refclk = dev_priv->cdclk.hw.ref;
ref_clock = dev_priv->cdclk.hw.ref;
mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
(pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
(mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
break;
@ -1422,12 +1402,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
div1 = 7;
break;
default:
MISSING_CASE(mg_clktop_hsclkctl);
MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
return 0;
}
div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
div2 = (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
/* div2 value of 0 is same as 1 means no div */
if (div2 == 0)
div2 = 1;
@ -1436,8 +1418,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
* Adjust the original formula to delay the division by 2^22 in order to
* minimize possible rounding errors.
*/
tmp = (u64)m1 * m2_int * refclk +
(((u64)m1 * m2_frac * refclk) >> 22);
tmp = (u64)m1 * m2_int * ref_clock +
(((u64)m1 * m2_frac * ref_clock) >> 22);
tmp = div_u64(tmp, 5 * div1 * div2);
return tmp;
@ -1471,21 +1453,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
int link_clock = 0;
u32 pll_id;
int link_clock;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
pipe_config->shared_dpll);
if (pll_id == DPLL_ID_ICL_TBTPLL)
link_clock = icl_calc_tbt_pll_link(dev_priv, port);
else
link_clock = icl_calc_mg_pll_link(dev_priv, port);
link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
}
pipe_config->port_clock = link_clock;
ddi_dotclock_get(pipe_config);
}
@ -1493,18 +1478,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
u32 cfgcr0;
enum intel_dpll_id pll_id;
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
int link_clock;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
switch (link_clock) {
case DPLL_CFGCR0_LINK_RATE_810:
@ -1544,22 +1524,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
u32 dpll_ctl1;
enum intel_dpll_id pll_id;
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
int link_clock;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
dpll_ctl1 = I915_READ(DPLL_CTRL1);
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
/*
* ctrl1 register is already shifted for each pll, just use 0 to get
* the internal shift for each field
*/
if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
link_clock = skl_calc_wrpll_link(pll_state);
} else {
link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@ -1639,24 +1617,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
{
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(!crtc_state->shared_dpll))
return 0;
state = &crtc_state->dpll_hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
return chv_calc_dpll_params(100000, &clock);
}
@ -1664,7 +1635,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
pipe_config->port_clock =
bxt_calc_pll_link(&pipe_config->dpll_hw_state);
ddi_dotclock_get(pipe_config);
}
@ -3074,7 +3046,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE,
DP_TP_STATUS_FEC_ENABLE_LIVE,
1))
@ -3552,6 +3524,8 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_ddi_set_pipe_settings(crtc_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);

View file

@ -57,6 +57,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
};
#undef PLATFORM_NAME
@ -155,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_en;
int s;
sseu->max_slices = 1;
sseu->max_subslices = 8;
sseu->max_eus_per_subslice = 8;
if (IS_ELKHARTLAKE(dev_priv)) {
sseu->max_slices = 1;
sseu->max_subslices = 4;
sseu->max_eus_per_subslice = 8;
} else {
sseu->max_slices = 1;
sseu->max_subslices = 8;
sseu->max_eus_per_subslice = 8;
}
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
@ -871,22 +878,23 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse;
u16 vdbox_mask;
u16 vebox_mask;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
@ -899,15 +907,20 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
vdbox_mask, VDBOX_MASK(dev_priv));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}
DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
vebox_mask, VEBOX_MASK(dev_priv));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}

View file

@ -73,6 +73,7 @@ enum intel_platform {
INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
INTEL_ELKHARTLAKE,
INTEL_MAX_PLATFORMS
};
@ -208,10 +209,6 @@ struct intel_runtime_info {
u32 cs_timestamp_frequency_khz;
/* Enabled (not fused off) media engine bitmasks. */
u8 vdbox_enable;
u8 vebox_enable;
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
};

View file

@ -1040,7 +1040,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
reg, I965_PIPECONF_ACTIVE, 0,
100))
WARN(1, "pipe_off wait timed out\n");
@ -1346,7 +1346,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe),
DPLL_LOCK_VLV,
DPLL_LOCK_VLV,
@ -1399,7 +1399,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
@ -1442,6 +1442,14 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
@ -1453,7 +1461,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
if (i9xx_has_pps(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/*
@ -1572,7 +1580,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@ -1633,7 +1641,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
I915_WRITE(reg, val | TRANS_ENABLE);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
@ -1663,7 +1671,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF,
TRANS_STATE_ENABLE,
TRANS_STATE_ENABLE,
@ -1689,7 +1697,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
@ -1711,7 +1719,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("Failed to disable PCH transcoder\n");
@ -5291,7 +5299,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, IPS_ENABLE,
50))
DRM_ERROR("Timed out waiting for IPS enable\n");
@ -5316,7 +5324,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, 0,
100))
DRM_ERROR("Timed out waiting for IPS disable\n");
@ -7962,14 +7970,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
return false;
return INTEL_GEN(dev_priv) >= 4 ||
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
if (!i9xx_has_pfit(dev_priv))
return;
tmp = I915_READ(PFIT_CONTROL);
@ -9465,7 +9481,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@ -9503,7 +9520,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@ -9520,7 +9537,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("LCPLL not locked yet\n");
@ -9535,7 +9552,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@ -13517,7 +13534,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* so enable debugging for the next modeset - and hope we catch
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
@ -14697,7 +14714,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
if (INTEL_GEN(dev_priv) >= 11) {
if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
icl_dsi_init(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);

View file

@ -1723,12 +1723,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
struct link_config_limits {
int min_clock, max_clock;
int min_lane_count, max_lane_count;
int min_bpp, max_bpp;
};
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
@ -1791,7 +1785,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
}
/* Adjust link config limits based on compliance test requests. */
static void
void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
@ -2107,6 +2101,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
return crtc_state->pipe_bpp != 18 &&
drm_default_rgb_quant_range(adjusted_mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
return intel_conn_state->broadcast_rgb ==
INTEL_BROADCAST_RGB_LIMITED;
}
}
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@ -2175,20 +2192,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (ret < 0)
return ret;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
pipe_config->limited_color_range =
pipe_config->pipe_bpp != 18 &&
drm_default_rgb_quant_range(adjusted_mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
if (!pipe_config->dsc_params.compression_enable)
intel_link_compute_m_n(pipe_config->pipe_bpp,
@ -2348,7 +2353,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
pp_stat_reg, mask, value,
5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@ -3937,7 +3942,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE,
DP_TP_STATUS_IDLE_DONE,
1))
@ -7033,9 +7038,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
return NULL;
}
downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
&connector->base);
downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
@ -7057,7 +7060,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
enum pipe pipe = INVALID_PIPE;
intel_wakeref_t wakeref;
struct edid *edid;
@ -7110,26 +7112,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_connector, fixed_mode);
break;
}
}
fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
if (fixed_mode)
downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
/* fallback to VBT if available for eDP */
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
}
}
if (!fixed_mode)
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {

View file

@ -29,72 +29,108 @@
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
struct drm_atomic_state *state = crtc_state->base.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_clock;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
crtc_state->pipe_bpp);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
port, crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
break;
}
if (slots < 0) {
DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
return slots;
}
intel_link_compute_m_n(crtc_state->pipe_bpp,
crtc_state->lane_count,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
constant_n);
crtc_state->dp_m_n.tu = slots;
return 0;
}
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_connector *connector = conn_state->connector;
void *port = to_intel_connector(connector)->port;
struct drm_atomic_state *state = pipe_config->base.state;
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_crtc_state *old_crtc_state =
drm_atomic_get_old_crtc_state(state, crtc);
int bpp;
int lane_count, slots =
to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
void *port = connector->port;
struct link_config_limits limits;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
bpp = intel_dp->compliance.test_data.bpc * 3;
DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
bpp);
}
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio =
drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
else
pipe_config->has_audio =
intel_conn_state->force_audio == HDMI_AUDIO_ON;
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_clock =
limits.max_clock = intel_dp_max_link_rate(intel_dp);
pipe_config->lane_count = lane_count;
limits.min_lane_count =
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
pipe_config->pipe_bpp = bpp;
limits.min_bpp = 6 * 3;
limits.max_bpp = pipe_config->pipe_bpp;
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
pipe_config->has_audio = true;
ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
conn_state, &limits);
if (ret)
return ret;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
mst_pbn);
if (slots < 0) {
DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
slots);
return slots;
}
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
constant_n);
pipe_config->dp_m_n.tu = slots;
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@ -117,7 +153,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
int ret = 0;
int ret;
ret = intel_digital_connector_atomic_check(connector, new_conn_state);
if (ret)
return ret;
if (!old_conn_state->crtc)
return 0;
@ -289,7 +329,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT,
DP_TP_STATUS_ACT_SENT,
@ -354,11 +394,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@ -373,7 +415,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
if (drm_connector_is_unregistered(connector))
@ -386,7 +427,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, bpp);
mode_rate = intel_dp_link_required(mode->clock, 18);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@ -487,6 +528,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (ret)
goto err;
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
err:

View file

@ -341,7 +341,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
@ -383,7 +383,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
if (intel_wait_for_register_fw(&dev_priv->uncore,
BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))

View file

@ -960,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DPLL_STATUS,
DPLL_LOCK(id),
DPLL_LOCK(id),
@ -1977,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
PLL_POWER_STATE,
@ -2018,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
PLL_LOCK,
@ -2066,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
0,
@ -2088,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
0,
@ -3050,8 +3050,8 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
PLL_POWER_STATE, 1))
if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
PLL_POWER_STATE, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
}
@ -3066,8 +3066,8 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 600us. */
if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
1))
if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
PLL_LOCK, PLL_LOCK, 1))
DRM_ERROR("PLL %d not locked\n", pll->info->id);
}
@ -3149,7 +3149,8 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
if (intel_wait_for_register(&dev_priv->uncore,
enable_reg, PLL_LOCK, 0, 1))
DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@ -3162,8 +3163,8 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
1))
if (intel_wait_for_register(&dev_priv->uncore,
enable_reg, PLL_POWER_STATE, 0, 1))
DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
}
@ -3245,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
static const struct dpll_info ehl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
{ },
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dpll = icl_get_dpll,
.dump_hw_state = icl_dump_hw_state,
};
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@ -3258,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
if (INTEL_GEN(dev_priv) >= 11)
if (IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;

View file

@ -1659,7 +1659,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id);
struct intel_dpll_hw_state *state);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@ -1909,6 +1909,16 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
struct link_config_limits {
int min_clock, max_clock;
int min_lane_count, max_lane_count;
int min_bpp, max_bpp;
};
void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits);
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
@ -2154,10 +2164,13 @@ void intel_panel_update_backlight(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
struct drm_display_mode *
intel_panel_edid_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *fixed_mode);
struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
int intel_backlight_device_register(struct intel_connector *connector);
@ -2267,18 +2280,24 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *i915)
assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
{
WARN_ONCE(i915->runtime_pm.suspended,
WARN_ONCE(rpm->suspended,
"Device suspended during HW access\n");
}
static inline void
__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
{
assert_rpm_device_not_suspended(rpm);
WARN_ONCE(!atomic_read(&rpm->wakeref_count),
"RPM wakelock ref not held during HW access");
}
static inline void
assert_rpm_wakelock_held(struct drm_i915_private *i915)
{
assert_rpm_device_not_suspended(i915);
WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
__assert_rpm_wakelock_held(&i915->runtime_pm);
}
/**
@ -2372,7 +2391,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);

View file

@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);

View file

@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct intel_connector *connector = intel_dsi->attached_connector;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (!mode)
return 0;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(&connector->base, mode);
return 1;
}
#define ICL_PREPARE_CNT_MAX 0x7
#define ICL_CLK_ZERO_CNT_MAX 0xf
#define ICL_TRAIL_CNT_MAX 0x7

View file

@ -255,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t hwstam;
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
hwstam = RING_HWSTAM(engine->mmio_base);
if (INTEL_GEN(dev_priv) >= 3)
I915_WRITE(hwstam, mask);
if (INTEL_GEN(engine->i915) >= 3)
ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
I915_WRITE16(hwstam, mask);
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
@ -309,6 +305,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->mask = BIT(id);
engine->i915 = dev_priv;
engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
@ -528,9 +525,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return PTR_ERR(obj);
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
if (ret)
goto err;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@ -581,7 +576,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
err = i915_timeline_init(engine->i915,
&engine->timeline,
engine->name,
engine->status_page.vma);
if (err)
goto err_hwsp;
@ -660,7 +654,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
return -ENOMEM;
if (i915_timeline_init(engine->i915,
&frame->timeline, "measure",
&frame->timeline,
engine->status_page.vma))
goto out_frame;
@ -790,50 +784,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_private *i915 = engine->i915;
u64 acthd;
if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
if (INTEL_GEN(i915) >= 8)
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
else if (INTEL_GEN(i915) >= 4)
acthd = ENGINE_READ(engine, RING_ACTHD);
else
acthd = I915_READ(ACTHD);
acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
if (INTEL_GEN(dev_priv) >= 8)
bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
RING_BBADDR_UDW(engine->mmio_base));
if (INTEL_GEN(engine->i915) >= 8)
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
int err;
if (INTEL_GEN(dev_priv) < 3)
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
GEM_TRACE("%s\n", engine->name);
I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
if (__intel_wait_for_register_fw(dev_priv,
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, 0,
NULL)) {
@ -842,19 +834,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
POSTING_READ_FW(mode);
intel_uncore_posting_read_fw(uncore, mode);
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
GEM_TRACE("%s\n", engine->name);
I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
_MASKED_BIT_DISABLE(STOP_RING));
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
@ -891,6 +880,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@ -912,33 +902,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
ret = I915_READ_FW(reg);
ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
return ret;
}
@ -948,6 +938,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
@ -956,12 +947,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
switch (INTEL_GEN(dev_priv)) {
default:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
@ -972,28 +965,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
}
break;
case 7:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
break;
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
instdone->sampler[0][0] =
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] =
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common = I915_READ(GEN4_INSTDONE1);
instdone->slice_common =
intel_uncore_read(uncore, GEN4_INSTDONE1);
break;
case 3:
case 2:
instdone->instdone = I915_READ(GEN2_INSTDONE);
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
break;
}
}
@ -1013,12 +1011,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
(I915_READ_TAIL(engine) & TAIL_ADDR))
if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
if (INTEL_GEN(dev_priv) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv, wakeref);
@ -1334,24 +1333,25 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
u64 addr;
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
I915_READ(RING_CTL(engine->mmio_base)),
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
I915_READ(RING_MI_MODE(engine->mmio_base)),
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
ENGINE_READ(engine, RING_MI_MODE),
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (INTEL_GEN(dev_priv) >= 6) {
drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
drm_printf(m, "\tRING_IMR: %08x\n",
ENGINE_READ(engine, RING_IMR));
}
addr = intel_engine_get_active_head(engine);
@ -1361,22 +1361,21 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 8)
addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
RING_DMA_FADD_UDW(engine->mmio_base));
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (INTEL_GEN(dev_priv) >= 4)
addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = I915_READ(DMA_FADD_I8XX);
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
I915_READ(RING_IPEIR(engine->mmio_base)));
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
I915_READ(RING_IPEHR(engine->mmio_base)));
ENGINE_READ(engine, RING_IPEHR));
} else {
drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(dev_priv)) {
@ -1386,15 +1385,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
u8 read, write;
drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
I915_READ(RING_EXECLIST_STATUS_HI(engine)));
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
read, write,
GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@ -1409,9 +1408,13 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
idx,
hws[idx * 2],
I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
ENGINE_READ_IDX(engine,
RING_CONTEXT_STATUS_BUF_LO,
idx),
hws[idx * 2 + 1],
I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
ENGINE_READ_IDX(engine,
RING_CONTEXT_STATUS_BUF_HI,
idx));
}
rcu_read_lock();
@ -1438,11 +1441,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE_READ(engine)));
ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
@ -1689,8 +1692,7 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
static bool match_ring(struct i915_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
u32 ring = ENGINE_READ(rq->engine, RING_START);
return ring == i915_ggtt_offset(rq->ring->vma);
}

View file

@ -29,6 +29,7 @@ struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
struct intel_uncore;
struct intel_hw_status_page {
struct i915_vma *vma;
@ -250,6 +251,7 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;

View file

@ -108,7 +108,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");

View file

@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@ -369,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc)
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@ -398,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 status;
int i;
int ret;
@ -414,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
for (i = 0; i < len; i++)
I915_WRITE(guc_send_reg(guc, i), action[i]);
intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
POSTING_READ(guc_send_reg(guc, i - 1));
intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
@ -427,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
* No GuC command should ever take longer than 10ms.
* Fast commands should still complete in 10us.
*/
ret = __intel_wait_for_register_fw(dev_priv,
ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
INTEL_GUC_MSG_TYPE_MASK,
INTEL_GUC_MSG_TYPE_RESPONSE <<
@ -454,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = INTEL_GUC_MSG_TO_DATA(status);
out:
intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
@ -484,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, msg);
intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len)
{
u32 msg;
if (unlikely(!len))
return -EPROTO;
/* Make sure to handle only enabled messages */
msg &= guc->msg_enabled_mask;
msg = payload[0] & guc->msg_enabled_mask;
if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
intel_guc_log_handle_flush_event(&guc->log);
return 0;
}
int intel_guc_sample_forcewake(struct intel_guc *guc)
@ -556,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc,
if (ret)
return ret;
ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)

View file

@ -165,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);

View file

@ -701,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct,
u32 action, u32 len, const u32 *payload)
{
struct intel_guc *guc = ct_to_guc(ct);
int ret;
CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
if (unlikely(len < 1))
ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
if (unlikely(ret))
goto fail_unexpected;
intel_guc_to_host_process_recv_msg(guc, *payload);
break;
default:

View file

@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc);
@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
ret = guc_xfer_ucode(guc, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}

View file

@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
tmp = I915_READ_CTL(engine);
tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
i915_handle_error(dev_priv, engine->mask, 0,
"stuck wait on %s", engine->name);
I915_WRITE_CTL(engine, tmp);
ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@ -270,7 +270,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
for_each_engine(engine, dev_priv, id) {
struct hangcheck hc;

View file

@ -225,7 +225,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
}
/* Wait for the keys to load (500us) */
ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
10, 1, &val);
if (ret)
@ -243,7 +243,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
@ -474,7 +474,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
@ -604,7 +604,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY,
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
@ -685,7 +685,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
/* Wait for encryption confirmation */
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@ -717,7 +717,8 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
if (intel_wait_for_register(&dev_priv->uncore,
PORT_HDCP_STATUS(port), ~0, 0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@ -1477,7 +1478,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS,
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@ -1498,7 +1499,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
ret = intel_wait_for_register(dev_priv, HDCP2_STATUS_DDI(port),
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
LINK_ENCRYPTION_STATUS, 0x0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)

View file

@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
ret = __intel_wait_for_register(i915,
ret = __intel_wait_for_register(&i915->uncore,
HUC_STATUS2,
HUC_FW_VERIFIED,
HUC_FW_VERIFIED,

View file

@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
u32 size;
int ret;
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
huc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
intel_uncore_write(uncore, DMA_ADDR_0_LOW,
lower_32_bits(offset));
intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
upper_32_bits(offset) & 0xFFFF);
/* Hardware doesn't look at destination address for HuC. Set it to 0,
/*
* Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
I915_WRITE(DMA_COPY_SIZE, size);
intel_uncore_write(uncore, DMA_COPY_SIZE, size);
/* Start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
intel_uncore_write(uncore, DMA_CTRL,
_MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}

View file

@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE_FW(GMBUS4, irq_enable);
ret = intel_wait_for_register_fw(dev_priv,
ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);

View file

@ -1248,6 +1248,30 @@ static void execlists_context_destroy(struct kref *kref)
intel_context_free(ce);
}
static int __context_pin(struct i915_vma *vma)
{
unsigned int flags;
int err;
flags = PIN_GLOBAL | PIN_HIGH;
flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
return err;
vma->obj->pin_global++;
vma->obj->mm.dirty = true;
return 0;
}
static void __context_unpin(struct i915_vma *vma)
{
vma->obj->pin_global--;
__i915_vma_unpin(vma);
}
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@ -1276,31 +1300,8 @@ static void execlists_context_unpin(struct intel_context *ce)
intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state);
}
static int __context_pin(struct i915_vma *vma)
{
unsigned int flags;
int err;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_wc_domain(vma->obj, true);
if (err)
return err;
}
flags = PIN_GLOBAL | PIN_HIGH;
flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
return i915_vma_pin(vma, 0, 0, flags);
__context_unpin(ce->state);
}
static void
@ -1361,7 +1362,6 @@ __execlists_context_pin(struct intel_context *ce,
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
ce->state->obj->pin_global++;
return 0;
unpin_ring:
@ -1369,7 +1369,7 @@ __execlists_context_pin(struct intel_context *ce,
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
__i915_vma_unpin(ce->state);
__context_unpin(ce->state);
err:
return ret;
}
@ -2074,16 +2074,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask | engine->irq_keep_mask));
POSTING_READ_FW(RING_IMR(engine->mmio_base));
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
ENGINE_POSTING_READ(engine, RING_IMR);
}
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct i915_request *request, u32 mode)
@ -2288,7 +2286,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
}
if (engine->cleanup)
@ -2315,8 +2313,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->park = NULL;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (!intel_vgpu_active(engine->i915))
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
if (engine->preempt_context)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
@ -2400,6 +2399,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u32 base = engine->mmio_base;
int ret;
ret = intel_engine_init_common(engine);
@ -2409,13 +2409,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
execlists->ctrl_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
execlists->submit_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
execlists->ctrl_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_ELSP(engine));
execlists->submit_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_ELSP(base));
}
execlists->preempt_complete_status = ~0u;
@ -2658,7 +2658,7 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
if (INTEL_GEN(engine->i915) < 11) {
@ -2751,19 +2751,12 @@ populate_lr_context(struct intel_context *ce,
u32 *regs;
int ret;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
return ret;
}
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
ctx_obj->mm.dirty = true;
if (engine->default_state) {
/*
@ -2798,14 +2791,21 @@ populate_lr_context(struct intel_context *ce,
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
ret = 0;
err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj,
LRC_HEADER_PAGES * PAGE_SIZE,
engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
{
return i915_timeline_create(ctx->i915, ctx->name, NULL);
if (ctx->timeline)
return i915_timeline_get(ctx->timeline);
else
return i915_timeline_create(ctx->i915, NULL);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,

View file

@ -28,20 +28,20 @@
#include "i915_gem_context.h"
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
#define RING_ELSP(base) _MMIO((base) + 0x230)
#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
#define RING_CONTEXT_STATUS_BUF_BASE(base) _MMIO((base) + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(base, i) _MMIO((base) + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(base, i) _MMIO((base) + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status

View file

@ -311,7 +311,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
if (intel_wait_for_register(&dev_priv->uncore,
PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@ -325,7 +326,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
if (intel_wait_for_register(&dev_priv->uncore,
PP_STATUS(0), PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@ -810,7 +812,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
@ -949,30 +950,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
DRM_DEBUG_KMS("using preferred mode from EDID: ");
drm_mode_debug_printmodeline(scan);
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode)
goto out;
}
}
fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
if (fixed_mode)
goto out;
/* Failed to get EDID, what about VBT? */
if (dev_priv->vbt.lfp_lvds_vbt_mode) {
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
goto out;
}
}
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
if (fixed_mode)
goto out;
/*
* If we didn't get EDID, try checking if the panel is already turned

View file

@ -46,27 +46,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
* @dev_priv: i915 device instance
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
* Return downclock_avail
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
intel_find_panel_downclock(struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
const struct drm_display_mode *fixed_mode)
{
struct drm_display_mode *scan, *tmp_mode;
int temp_downclock;
return drm_mode_match(downclock_mode, fixed_mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS) &&
downclock_mode->clock < fixed_mode->clock;
}
temp_downclock = fixed_mode->clock;
tmp_mode = NULL;
struct drm_display_mode *
intel_panel_edid_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *fixed_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *scan, *best_mode = NULL;
struct drm_display_mode *downclock_mode;
int best_clock = fixed_mode->clock;
list_for_each_entry(scan, &connector->probed_modes, head) {
list_for_each_entry(scan, &connector->base.probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
@ -74,29 +73,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
if (scan->hdisplay == fixed_mode->hdisplay &&
scan->hsync_start == fixed_mode->hsync_start &&
scan->hsync_end == fixed_mode->hsync_end &&
scan->htotal == fixed_mode->htotal &&
scan->vdisplay == fixed_mode->vdisplay &&
scan->vsync_start == fixed_mode->vsync_start &&
scan->vsync_end == fixed_mode->vsync_end &&
scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
temp_downclock = scan->clock;
tmp_mode = scan;
}
if (is_downclock_mode(scan, fixed_mode) &&
scan->clock < best_clock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
best_clock = scan->clock;
best_mode = scan;
}
}
if (temp_downclock < fixed_mode->clock)
return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
else
if (!best_mode)
return NULL;
downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
if (!downclock_mode)
return NULL;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(downclock_mode);
return downclock_mode;
}
struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *scan;
struct drm_display_mode *fixed_mode;
if (list_empty(&connector->base.probed_modes))
return NULL;
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->base.probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
continue;
fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
if (!fixed_mode)
return NULL;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
}
scan = list_first_entry(&connector->base.probed_modes,
typeof(*scan), head);
fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
if (!fixed_mode)
return NULL;
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
}
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_display_info *info = &connector->base.display_info;
struct drm_display_mode *fixed_mode;
if (!dev_priv->vbt.lfp_lvds_vbt_mode)
return NULL;
fixed_mode = drm_mode_duplicate(&dev_priv->drm,
dev_priv->vbt.lfp_lvds_vbt_mode);
if (!fixed_mode)
return NULL;
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
info->width_mm = fixed_mode->width_mm;
info->height_mm = fixed_mode->height_mm;
return fixed_mode;
}
/* adjusted_mode has been preset to be the panel's fixed mode */

View file

@ -3924,12 +3924,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
static unsigned int skl_cursor_allocation(int num_active)
{
if (num_active == 1)
return 32;
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane);
static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
return 8;
static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
ret = skl_compute_wm_params(crtc_state, 256,
drm_format_info(DRM_FORMAT_ARGB8888),
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0);
WARN_ON(ret);
for (level = 0; level <= max_level; level++) {
skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
break;
min_ddb_alloc = wm.min_ddb_alloc;
}
return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
@ -4313,7 +4344,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
struct skl_plane_wm *wm;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@ -4354,7 +4384,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
/* Allocate fixed number of blocks for cursor. */
total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
alloc_size -= total[PLANE_CURSOR];
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
@ -4370,15 +4400,23 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
if (plane_id == PLANE_CURSOR)
continue;
const struct skl_plane_wm *wm =
&cstate->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (WARN_ON(wm->wm[level].min_ddb_alloc >
total[PLANE_CURSOR])) {
blocks = U32_MAX;
break;
}
continue;
}
wm = &cstate->wm.skl.optimal.planes[plane_id];
blocks += wm->wm[level].min_ddb_alloc;
blocks += wm->uv_wm[level].min_ddb_alloc;
}
if (blocks < alloc_size) {
if (blocks <= alloc_size) {
alloc_size -= blocks;
break;
}
@ -4397,6 +4435,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
&cstate->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@ -4410,8 +4450,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (total_data_rate == 0)
break;
wm = &cstate->wm.skl.optimal.planes[plane_id];
rate = plane_data_rate[plane_id];
extra = min_t(u16, alloc_size,
DIV64_U64_ROUND_UP(alloc_size * rate,
@ -4436,14 +4474,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
struct skl_ddb_entry *plane_alloc =
&cstate->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
&cstate->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
/* Gen11+ uses a separate plane for UV watermarks */
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
@ -4469,8 +4507,24 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
wm = &cstate->wm.skl.optimal.planes[plane_id];
memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
struct skl_plane_wm *wm =
&cstate->wm.skl.optimal.planes[plane_id];
/*
* We only disable the watermarks for each plane if
* they exceed the ddb allocation of said plane. This
* is done so that we don't end up touching cursor
* watermarks needlessly when some other plane reduces
* our max possible watermark level.
*
* Bspec has this to say about the PLANE_WM enable bit:
* "All the watermarks at this level for all enabled
* planes must be enabled before the level will be used."
* So this is actually safe to do.
*/
if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
/*
* Wa_1408961008:icl
@ -4490,7 +4544,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* don't have enough DDB blocks for it.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
wm = &cstate->wm.skl.optimal.planes[plane_id];
struct skl_plane_wm *wm =
&cstate->wm.skl.optimal.planes[plane_id];
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
@ -4584,57 +4640,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
struct skl_wm_params *wp, int color_plane)
skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 && !is_planar_yuv_format(fb->format->format)) {
if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
wp->is_planar = is_planar_yuv_format(fb->format->format);
if (plane->id == PLANE_CURSOR) {
wp->width = intel_pstate->base.crtc_w;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
wp->is_planar = is_planar_yuv_format(format->format);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
if (INTEL_GEN(dev_priv) >= 11 &&
fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
if (drm_rotation_90_or_270(pstate->rotation)) {
if (drm_rotation_90_or_270(rotation)) {
switch (wp->cpp) {
case 1:
wp->y_min_scanlines = 16;
@ -4679,12 +4723,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
wp->linetime_us = fixed16_to_u32_round_up(
intel_get_linetime_us(cstate));
intel_get_linetime_us(crtc_state));
return 0;
}
static int
skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
const struct drm_framebuffer *fb = plane_state->base.fb;
int width;
if (plane->id == PLANE_CURSOR) {
width = plane_state->base.crtc_w;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&plane_state->base.src) >> 16;
}
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
plane_state->base.rotation,
skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
wp, color_plane);
}
static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@ -4695,14 +4767,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
}
static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@ -4821,19 +4891,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
skl_compute_plane_wm(cstate, level, wm_params,
result_prev, result);
result_prev = result;
@ -4930,7 +4998,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@ -4952,13 +5020,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
return 0;
}
static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@ -4984,8 +5051,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
@ -5022,10 +5088,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
struct skl_pipe_wm *pipe_wm)
static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@ -5042,11 +5108,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
to_intel_plane_state(pstate);
if (INTEL_GEN(dev_priv) >= 11)
ret = icl_build_plane_wm(pipe_wm,
cstate, intel_pstate);
ret = icl_build_plane_wm(cstate, intel_pstate);
else
ret = skl_build_plane_wm(pipe_wm,
cstate, intel_pstate);
ret = skl_build_plane_wm(cstate, intel_pstate);
if (ret)
return ret;
}
@ -5201,23 +5265,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
bool *changed /* out */)
{
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
int ret;
ret = skl_build_pipe_wm(cstate, pipe_wm);
if (ret)
return ret;
*changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
return 0;
}
static u32
pipes_modified(struct intel_atomic_state *state)
{
@ -5556,10 +5603,9 @@ static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@ -5577,12 +5623,8 @@ skl_compute_wm(struct intel_atomic_state *state)
* pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
cstate, i) {
const struct skl_pipe_wm *old_pipe_wm =
&old_crtc_state->wm.skl.optimal;
pipe_wm = &cstate->wm.skl.optimal;
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
new_crtc_state, i) {
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
@ -5590,7 +5632,9 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
if (changed)
if (!skl_pipe_wm_equals(crtc,
&old_crtc_state->wm.skl.optimal,
&new_crtc_state->wm.skl.optimal))
results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
@ -6737,9 +6781,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* punit into committing the voltage change) as that takes a lot less
* power than the render powerwell.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
err = valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
if (err)
DRM_ERROR("Failed to set RPS for idle\n");
@ -6889,11 +6933,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
/* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@ -7052,7 +7096,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
if (IS_GEN(dev_priv, 9))
@ -7070,7 +7114,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@ -7084,7 +7128,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -7161,7 +7205,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
@ -7174,7 +7218,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -7195,14 +7239,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_RC6_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@ -7235,7 +7279,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
@ -7255,7 +7299,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -7303,7 +7347,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@ -7314,7 +7358,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
* Perhaps there might be some value in exposing these to
* userspace...
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Power down if completely idle for over 50ms */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
@ -7322,7 +7366,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@ -7745,7 +7789,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -7777,14 +7821,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@ -7819,7 +7863,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
@ -7837,7 +7881,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -7862,14 +7906,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@ -7903,7 +7947,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@ -8450,22 +8494,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev_priv: i915 device
*
* We don't want to disable RC6 or other features here, we just want
* to make sure any work we've queued has finished and won't bother
* us while we're suspended.
*/
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 6)
return;
/* gen6_rps_idle() will be called later to disable interrupts */
}
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
@ -9659,7 +9687,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (__intel_wait_for_register_fw(dev_priv,
if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
@ -9707,7 +9735,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (__intel_wait_for_register_fw(dev_priv,
if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
fast_timeout_us, slow_timeout_ms,
NULL)) {
@ -9931,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains;
unsigned long flags;
@ -9952,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0;
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@ -9974,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
}
overflow_hw = BIT_ULL(32);
time_hw = I915_READ_FW(reg);
time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
@ -9996,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}

View file

@ -834,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
2000))
if (intel_wait_for_register(&dev_priv->uncore,
psr_status, psr_status_mask, 0, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@ -956,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@ -981,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");

View file

@ -575,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
if (!IS_GEN_RANGE(dev_priv, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
I915_WRITE(instpm,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (intel_wait_for_register(dev_priv,
instpm, INSTPM_SYNC_FLUSH, 0,
ENGINE_WRITE(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (intel_wait_for_register(engine->uncore,
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
@ -606,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) > 2) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(dev_priv,
ENGINE_WRITE(engine,
RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(engine->uncore,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE,
MODE_IDLE,
1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
/* Sometimes we observe that the idle flag is not
/*
* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
if (ENGINE_READ(engine, RING_HEAD) !=
ENGINE_READ(engine, RING_TAIL))
return false;
}
}
I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
ENGINE_WRITE(engine, RING_HEAD, 0);
ENGINE_WRITE(engine, RING_TAIL, 0);
/* The ring must be empty before it is disabled */
I915_WRITE_CTL(engine, 0);
ENGINE_WRITE(engine, RING_CTL, 0);
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_engine_cs *engine)
@ -640,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->buffer;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_HEAD(engine),
I915_READ_TAIL(engine),
I915_READ_START(engine));
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_HEAD),
ENGINE_READ(engine, RING_TAIL),
ENGINE_READ(engine, RING_START));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_HEAD(engine),
I915_READ_TAIL(engine),
I915_READ_START(engine));
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_HEAD),
ENGINE_READ(engine, RING_TAIL),
ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@ -673,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
ENGINE_READ(engine, RING_HEAD);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(engine))
if (ENGINE_READ(engine, RING_HEAD))
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
engine->name, ENGINE_READ(engine, RING_HEAD));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@ -692,42 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_ring_update_space(ring);
/* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
ENGINE_WRITE(engine, RING_HEAD, ring->head);
ENGINE_WRITE(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
if (intel_wait_for_register(engine->uncore,
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_CTL(engine) & RING_VALID,
I915_READ_HEAD(engine), ring->head,
I915_READ_TAIL(engine), ring->tail,
I915_READ_START(engine),
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & RING_VALID,
ENGINE_READ(engine, RING_HEAD), ring->head,
ENGINE_READ(engine, RING_TAIL), ring->tail,
ENGINE_READ(engine, RING_START),
i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
ENGINE_WRITE(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
ENGINE_WRITE(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_queue_breadcrumbs(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
return ret;
}
@ -868,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
return 0;
}
@ -895,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
i915_request_submit(request);
I915_WRITE_TAIL(request->engine,
intel_ring_set_tail(request->ring, request->tail));
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
}
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
@ -972,20 +976,20 @@ gen5_irq_disable(struct intel_engine_cs *engine)
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
GEM_BUG_ON(engine->id != RCS0);
dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ_FW(RING_IMR(engine->mmio_base));
engine->i915->irq_mask &= ~engine->irq_enable_mask;
ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
ENGINE_POSTING_READ(engine, RING_IMR);
}
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
GEM_BUG_ON(engine->id != RCS0);
dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
engine->i915->irq_mask |= engine->irq_enable_mask;
ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask);
}
static void
@ -1025,47 +1029,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
engine->irq_keep_mask));
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
POSTING_READ_FW(RING_IMR(engine->mmio_base));
ENGINE_POSTING_READ(engine, RING_IMR);
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
POSTING_READ_FW(RING_IMR(engine->mmio_base));
ENGINE_POSTING_READ(engine, RING_IMR);
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
ENGINE_WRITE(engine, RING_IMR, ~0);
gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static int
@ -1195,15 +1190,6 @@ int intel_ring_pin(struct intel_ring *ring)
else
flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
else
ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
if (unlikely(ret))
goto unpin_timeline;
}
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
goto unpin_timeline;
@ -1392,17 +1378,6 @@ static int __context_pin(struct intel_context *ce)
if (!vma)
return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (err)
return err;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
return err;
@ -1412,6 +1387,7 @@ static int __context_pin(struct intel_context *ce)
* it cannot reclaim the object until we release it.
*/
vma->obj->pin_global++;
vma->obj->mm.dirty = true;
return 0;
}
@ -1446,6 +1422,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return ERR_CAST(obj);
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
if (engine->default_state) {
void *defaults, *vaddr;
@ -1463,29 +1457,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
i915_gem_object_unpin_map(obj);
}
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(i915)) {
/* Ignore any error, regard it as a simple optimisation */
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
@ -1553,9 +1528,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
return err;
timeline = i915_timeline_create(engine->i915,
engine->name,
engine->status_page.vma);
timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
@ -1598,7 +1571,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(I915_READ_MODE(engine) & MODE_IDLE) == 0);
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
@ -1633,11 +1606,11 @@ static int load_pd_dir(struct i915_request *rq,
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = ppgtt->pd.base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@ -1656,7 +1629,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
@ -2073,23 +2046,23 @@ int intel_ring_cacheline_align(struct i915_request *rq)
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
struct intel_uncore *uncore = request->engine->uncore;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
if (__intel_wait_for_register_fw(dev_priv,
if (__intel_wait_for_register_fw(uncore,
GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_INDICATOR,
0,
@ -2102,10 +2075,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)

View file

@ -29,23 +29,44 @@ struct drm_printer;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
/*
* The register defines to be used with the following macros need to accept a
* base param, e.g:
*
* REG_FOO(base) _MMIO((base) + <relative offset>)
* ENGINE_READ(engine, REG_FOO);
*
* register arrays are to be defined and accessed as follows:
*
* REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
* ENGINE_READ_IDX(engine, REG_BAR, i)
*/
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
#define __ENGINE_REG_OP(op__, engine__, ...) \
intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
#define __ENGINE_READ_OP(op__, engine__, reg__) \
__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
__ENGINE_REG_OP(read64_2x32, (engine__), \
lower_reg__((engine__)->mmio_base), \
upper_reg__((engine__)->mmio_base))
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
#define ENGINE_READ_IDX(engine__, reg__, idx__) \
__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.

View file

@ -565,7 +565,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
WARN_ON(intel_wait_for_register(dev_priv,
WARN_ON(intel_wait_for_register(&dev_priv->uncore,
regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx),
HSW_PWR_WELL_CTL_STATE(pw_idx),
@ -620,7 +620,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg),
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
@ -1521,7 +1521,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
phy_status_mask,
phy_status,
@ -1556,7 +1556,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy),
PHY_POWERGOOD(phy),

View file

@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,

View file

@ -269,7 +269,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_rect *src = &plane_state->base.src;
u32 src_x, src_y, src_w, src_h;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@ -287,18 +288,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
if (fb->format->is_yuv &&
(src_x & 1 || src_w & 1)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
if (!fb->format->is_yuv)
return 0;
/* YUV specific checks */
if (!rotated) {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
} else {
hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
if (src_x % hsub || src_w % hsub) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
src_x, src_w, hsub, rotated ? "rotated " : "");
return -EINVAL;
}
if (fb->format->is_yuv &&
fb->format->num_planes > 1 &&
(src_y & 1 || src_h & 1)) {
DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
src_y, src_h);
if (src_y % vsub || src_h % vsub) {
DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
src_y, src_h, vsub, rotated ? "rotated " : "");
return -EINVAL;
}
@ -1522,6 +1531,11 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
case DRM_FORMAT_Y210:
case DRM_FORMAT_Y212:
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@ -2098,12 +2112,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
case DRM_FORMAT_Y210:
case DRM_FORMAT_Y212:
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU2101010:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
@ -2112,6 +2121,11 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_ABGR16161616F:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_Y210:
case DRM_FORMAT_Y212:
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)

File diff suppressed because it is too large Load diff

View file

@ -28,10 +28,13 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/hrtimer.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "i915_reg.h"
struct drm_i915_private;
struct i915_runtime_pm;
struct intel_uncore;
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
@ -62,25 +65,25 @@ enum forcewake_domains {
};
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
u16 (*mmio_readw)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
u32 (*mmio_readl)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
u64 (*mmio_readq)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv,
void (*mmio_writeb)(struct intel_uncore *uncore,
i915_reg_t r, u8 val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv,
void (*mmio_writew)(struct intel_uncore *uncore,
i915_reg_t r, u16 val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv,
void (*mmio_writel)(struct intel_uncore *uncore,
i915_reg_t r, u32 val, bool trace);
};
@ -92,8 +95,18 @@ struct intel_forcewake_range {
};
struct intel_uncore {
void __iomem *regs;
struct i915_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
unsigned int flags;
#define UNCORE_HAS_FORCEWAKE BIT(0)
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
#define UNCORE_HAS_DBG_UNCLAIMED BIT(2)
#define UNCORE_HAS_FIFO BIT(3)
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
@ -106,10 +119,6 @@ struct intel_uncore {
enum forcewake_domains fw_domains_active;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
u32 fw_set;
u32 fw_clear;
u32 fw_reset;
struct intel_uncore_forcewake_domain {
enum forcewake_domain_id id;
enum forcewake_domains mask;
@ -131,12 +140,12 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
for (tmp__ = (mask__); \
tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
#define for_each_fw_domain(domain__, uncore__, tmp__) \
for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
static inline struct intel_uncore *
forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
@ -144,78 +153,229 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
return container_of(d, struct intel_uncore, fw_domain[d->id]);
}
static inline bool
intel_uncore_has_forcewake(const struct intel_uncore *uncore)
{
return uncore->flags & UNCORE_HAS_FORCEWAKE;
}
static inline bool
intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
{
return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
}
static inline bool
intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
{
return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
}
static inline bool
intel_uncore_has_fifo(const struct intel_uncore *uncore)
{
return uncore->flags & UNCORE_HAS_FIFO;
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
void intel_uncore_init(struct drm_i915_private *dev_priv);
void intel_uncore_prune(struct drm_i915_private *dev_priv);
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
int intel_uncore_init(struct intel_uncore *uncore);
void intel_uncore_prune(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_fini(struct intel_uncore *uncore);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
void intel_uncore_runtime_resume(struct intel_uncore *uncore);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
void assert_forcewakes_active(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct intel_uncore *uncore);
void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
/* Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
int __intel_wait_for_register(struct drm_i915_private *dev_priv,
int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
static inline
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int timeout_ms)
static inline int
intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int timeout_ms)
{
return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
return __intel_wait_for_register(uncore, reg, mask, value, 2,
timeout_ms, NULL);
}
int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
static inline
int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
static inline int
intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int timeout_ms)
{
return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
return __intel_wait_for_register_fw(uncore, reg, mask, value,
2, timeout_ms, NULL);
}
/* register access functions */
#define __raw_read(x__, s__) \
static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
i915_reg_t reg) \
{ \
return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x__, s__) \
static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
i915_reg_t reg, u##x__ val) \
{ \
write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
__raw_read(32, l)
__raw_read(64, q)
__raw_write(8, b)
__raw_write(16, w)
__raw_write(32, l)
__raw_write(64, q)
#undef __raw_read
#undef __raw_write
#define __uncore_read(name__, x__, s__, trace__) \
static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
i915_reg_t reg) \
{ \
return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
}
#define __uncore_write(name__, x__, s__, trace__) \
static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
i915_reg_t reg, u##x__ val) \
{ \
uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
}
__uncore_read(read8, 8, b, true)
__uncore_read(read16, 16, w, true)
__uncore_read(read, 32, l, true)
__uncore_read(read16_notrace, 16, w, false)
__uncore_read(read_notrace, 32, l, false)
__uncore_write(write8, 8, b, true)
__uncore_write(write16, 16, w, true)
__uncore_write(write, 32, l, true)
__uncore_write(write_notrace, 32, l, false)
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
* machine death. For this reason we do not support I915_WRITE64, or
* uncore->funcs.mmio_writeq.
*
* When reading a 64-bit value as two 32-bit values, the delay may cause
* the two reads to mismatch, e.g. a timestamp overflowing. Also note that
* occasionally a 64-bit register does not actually support a full readq
* and must be read using two 32-bit reads.
*
* You have been warned.
*/
__uncore_read(read64, 64, q, true)
static inline u64
intel_uncore_read64_2x32(struct intel_uncore *uncore,
i915_reg_t lower_reg, i915_reg_t upper_reg)
{
u32 upper, lower, old_upper, loop = 0;
upper = intel_uncore_read(uncore, upper_reg);
do {
old_upper = upper;
lower = intel_uncore_read(uncore, lower_reg);
upper = intel_uncore_read(uncore, upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
}
#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
#undef __uncore_read
#undef __uncore_write
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
*
* Think twice, and think again, before using these.
*
* As an example, these accessors can possibly be used between:
*
* spin_lock_irq(&uncore->lock);
* intel_uncore_forcewake_get__locked();
*
* and
*
* intel_uncore_forcewake_put__locked();
* spin_unlock_irq(&uncore->lock);
*
*
* Note: some registers may not need forcewake held, so
* intel_uncore_forcewake_{get,put} can be omitted, see
* intel_uncore_forcewake_for_reg().
*
* Certain architectures will die if the same cacheline is concurrently accessed
* by different clients (e.g. on Ivybridge). Access to registers should
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
static inline void intel_uncore_rmw_or_fw(struct intel_uncore *uncore,
i915_reg_t reg, u32 or_val)
{
intel_uncore_write_fw(uncore, reg,
intel_uncore_read_fw(uncore, reg) | or_val);
}
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \

View file

@ -905,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(dev_priv,
fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
@ -927,7 +927,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
fw = wal_get_fw_for_rmw(dev_priv, wal);
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_get__locked(dev_priv, fw);
intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val = I915_READ_FW(wa->reg);
@ -938,7 +938,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
I915_WRITE_FW(wa->reg, val);
}
intel_uncore_forcewake_put__locked(dev_priv, fw);
intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}

View file

@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
goto err;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
}
*vaddr = 0xdeadbeaf;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL);
@ -1713,7 +1710,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@ -1735,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:

View file

@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
}
for (n = 0; n < nctx; n++) {
ctx[n] = i915_gem_create_context(i915, file->driver_priv);
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0;
}
static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int idx, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) {
if (map[m] != m) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
n, m, map[m], m);
pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
__builtin_return_address(0), idx,
n, real_page_count(obj), m, max,
map[m], m);
err = -EINVAL;
goto out_unmap;
}
@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
n, m, map[m], STACK_MAGIC);
pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
__builtin_return_address(0), idx, n, m,
map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENODEV;
/*
@ -495,38 +496,42 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
for_each_engine(engine, i915, id) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
mutex_lock(&i915->drm.struct_mutex);
if (!intel_engine_can_store_dword(engine))
continue;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
if (!engine->context_size)
continue; /* No logical context support in HW */
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
unsigned int id;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
}
for_each_engine(engine, i915, id) {
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
if (!engine->context_size)
continue; /* No logical context support in HW */
if (!intel_engine_can_store_dword(engine))
continue;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
}
if (!obj) {
obj = create_test_object(ctx, file, &objects);
@ -536,7 +541,6 @@ static int igt_ctx_exec(void *arg)
}
}
err = 0;
with_intel_runtime_pm(i915, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
@ -551,28 +555,152 @@ static int igt_ctx_exec(void *arg)
obj = NULL;
dw = 0;
}
ndwords++;
ncontexts++;
}
ncontexts++;
}
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords);
err = cpu_check(obj, rem);
if (err)
break;
ncontexts = dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
dw += rem;
}
err = cpu_check(obj, ncontexts++, rem);
if (err)
break;
dw += rem;
}
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
return err;
}
return 0;
}
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
int err = 0;
/*
* Create a few different contexts with the same mm and write
* through each ctx using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
goto out_unlock;
}
if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
err = 0;
goto out_unlock;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
for_each_engine(engine, i915, id) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
if (!intel_engine_can_store_dword(engine))
continue;
dw = 0;
ndwords = 0;
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_test;
}
__assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
kernel_context_close(ctx);
goto out_test;
}
}
err = 0;
with_intel_runtime_pm(i915, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
kernel_context_close(ctx);
goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
ndwords++;
ncontexts++;
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords);
ncontexts = dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
err = cpu_check(obj, ncontexts++, rem);
if (err)
goto out_test;
dw += rem;
}
}
out_test:
if (igt_live_test_end(&t))
err = -EIO;
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@ -962,7 +1087,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, file->driver_priv);
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out_unlock;
@ -1048,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
unsigned long ndwords, dw;
unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@ -1072,7 +1197,7 @@ static int igt_ctx_readonly(void *arg)
if (err)
goto out_unlock;
ctx = i915_gem_create_context(i915, file->driver_priv);
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@ -1129,6 +1254,7 @@ static int igt_ctx_readonly(void *arg)
ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
idx = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
@ -1138,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
err = cpu_check(obj, num_writes);
err = cpu_check(obj, idx++, num_writes);
if (err)
break;
@ -1202,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@ -1299,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@ -1397,13 +1518,13 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_unlock;
ctx_a = i915_gem_create_context(i915, file->driver_priv);
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
ctx_b = i915_gem_create_context(i915, file->driver_priv);
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
@ -1620,13 +1741,14 @@ static int mock_context_barrier(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx = mock_context(i915, "mock");
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
if (!ctx) {
err = -ENOMEM;
goto unlock;
}
counter = 0;
err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
err = context_barrier_task(ctx, 0,
NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@ -1638,8 +1760,8 @@ static int mock_context_barrier(void *arg)
}
counter = 0;
err = context_barrier_task(ctx,
ALL_ENGINES, mock_barrier_task, &counter);
err = context_barrier_task(ctx, ALL_ENGINES,
NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@ -1662,8 +1784,8 @@ static int mock_context_barrier(void *arg)
counter = 0;
context_barrier_inject_fault = BIT(RCS0);
err = context_barrier_task(ctx,
ALL_ENGINES, mock_barrier_task, &counter);
err = context_barrier_task(ctx, ALL_ENGINES,
NULL, mock_barrier_task, &counter);
context_barrier_inject_fault = 0;
if (err == -ENXIO)
err = 0;
@ -1677,8 +1799,8 @@ static int mock_context_barrier(void *arg)
goto out;
counter = 0;
err = context_barrier_task(ctx,
ALL_ENGINES, mock_barrier_task, &counter);
err = context_barrier_task(ctx, ALL_ENGINES,
NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@ -1726,6 +1848,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation),
};

View file

@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);

View file

@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
if (!ctx)
if (IS_ERR(ctx))
break;
/* We will need some GGTT space for the rq's context */

View file

@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);

View file

@ -619,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
}
*cmd = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
i915_gem_chipset_flush(i915);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@ -777,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
goto err;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@ -799,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
i915_gem_chipset_flush(i915);
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
i915_gem_chipset_flush(i915);
return vma;
err:

View file

@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
tl = i915_timeline_create(state->i915, "mock", NULL);
tl = i915_timeline_create(state->i915, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
tl = i915_timeline_create(i915, "live", NULL);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl))
return tl;
@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
tl = i915_timeline_create(i915, __func__, NULL);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out_rpm;

View file

@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws;
}
i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);

View file

@ -70,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws;
}
i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);

View file

@ -10,6 +10,7 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
@ -113,11 +114,17 @@ static int live_preempt(void *arg)
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@ -153,7 +160,8 @@ static int live_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@ -207,11 +215,17 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@ -250,7 +264,8 @@ static int live_late_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@ -615,14 +630,39 @@ static int live_chain_preempt(void *arg)
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
int count, i;
struct igt_live_test t;
struct i915_request *rq;
int ring_size, count, i;
if (!intel_engine_has_preemption(engine))
continue;
for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
struct i915_request *rq;
rq = igt_spinner_create_request(&lo.spin,
lo.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
if (ring_size < 0)
ring_size += rq->ring->size;
ring_size = rq->ring->size / ring_size;
pr_debug("%s(%s): Using maximum of %d requests\n",
__func__, engine->name, ring_size);
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
goto err_wedged;
}
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
@ -664,6 +704,26 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
igt_spinner_end(&lo.spin);
rq = i915_request_alloc(engine, lo.ctx);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
goto err_wedged;
}
}
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_wedged;
}
}
@ -988,6 +1048,7 @@ static int live_preempt_smoke(void *arg)
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
@ -1018,11 +1079,13 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
if (err)
if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
@ -1041,7 +1104,7 @@ static int live_preempt_smoke(void *arg)
}
err_ctx:
if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {

View file

@ -119,9 +119,132 @@ int intel_uncore_mock_selftests(void)
return 0;
}
static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
static int live_forcewake_ops(void *arg)
{
static const struct reg {
const char *name;
unsigned long platforms;
unsigned int offset;
} registers[] = {
{
"RING_START",
INTEL_GEN_MASK(6, 7),
0x38,
},
{
"RING_MI_MODE",
INTEL_GEN_MASK(8, BITS_PER_LONG),
0x9c,
}
};
const struct reg *r;
struct drm_i915_private *i915 = arg;
struct intel_uncore_forcewake_domain *domain;
struct intel_uncore *uncore = &i915->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned int tmp;
int err = 0;
GEM_BUG_ON(i915->gt.awake);
/* vlv/chv with their pcu behave differently wrt reads */
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
pr_debug("PCU fakes forcewake badly; skipping\n");
return 0;
}
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
if (r->platforms & INTEL_INFO(i915)->gen_mask)
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
intel_platform_name(INTEL_INFO(i915)->platform));
return 0;
}
wakeref = intel_runtime_pm_get(i915);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
if (!hrtimer_cancel(&domain->timer))
continue;
intel_uncore_fw_release_timer(&domain->timer);
}
for_each_engine(engine, i915, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
u32 val;
if (!engine->default_state)
continue;
fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
FW_REG_READ);
if (!fw_domains)
continue;
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (!domain->wake_count)
continue;
pr_err("fw_domain %s still active, aborting test!\n",
intel_uncore_forcewake_domain_to_str(domain->id));
err = -EINVAL;
goto out_rpm;
}
intel_uncore_forcewake_get(uncore, fw_domains);
val = readl(reg);
intel_uncore_forcewake_put(uncore, fw_domains);
/* Flush the forcewake release (delayed onto a timer) */
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
smp_store_mb(domain->active, false);
if (hrtimer_cancel(&domain->timer))
intel_uncore_fw_release_timer(&domain->timer);
preempt_disable();
err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
preempt_enable();
if (err) {
pr_err("Failed to clear fw_domain %s\n",
intel_uncore_forcewake_domain_to_str(domain->id));
goto out_rpm;
}
}
if (!val) {
pr_err("%s:%s was zero while fw was held!\n",
engine->name, r->name);
err = -EINVAL;
goto out_rpm;
}
/* We then expect the read to return 0 outside of the fw */
if (wait_for(readl(reg) == 0, 100)) {
pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
engine->name, r->name, readl(reg), fw_domains);
err = -ETIMEDOUT;
goto out_rpm;
}
}
out_rpm:
intel_runtime_pm_put(i915, wakeref);
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
struct drm_i915_private *dev_priv = arg;
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long *valid;
u32 offset;
int err;
@ -137,48 +260,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
GFP_KERNEL);
valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
check_for_unclaimed_mmio(dev_priv);
check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
(void)I915_READ_FW(reg);
if (!check_for_unclaimed_mmio(dev_priv))
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(dev_priv);
intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
check_for_unclaimed_mmio(dev_priv);
check_for_unclaimed_mmio(uncore);
(void)I915_READ(reg);
if (check_for_unclaimed_mmio(dev_priv)) {
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
kfree(valid);
bitmap_free(valid);
return err;
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
int err;
/* Confirm the table we load is still valid */
@ -188,9 +315,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
err = intel_uncore_check_forcewake_domains(i915);
if (err)
return err;
return 0;
return i915_subtests(tests, i915);
}

View file

@ -90,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj;
}
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@ -358,6 +359,7 @@ static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
goto err_obj;
}
memset(ptr, 0xc5, PAGE_SIZE);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
@ -551,6 +553,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
i915_gem_chipset_flush(ctx->i915);

View file

@ -54,13 +54,17 @@ mock_context(struct drm_i915_private *i915,
goto err_handles;
if (name) {
struct i915_hw_ppgtt *ppgtt;
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
goto err_put;
ctx->ppgtt = mock_ppgtt(i915, name);
if (!ctx->ppgtt)
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
goto err_put;
__set_ppgtt(ctx, ppgtt);
}
return ctx;
@ -88,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file)
{
struct i915_gem_context *ctx;
int err;
lockdep_assert_held(&i915->drm.struct_mutex);
return i915_gem_create_context(i915, file->driver_priv);
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
err = gem_context_register(ctx, file->driver_priv);
if (err < 0)
goto err_ctx;
return ctx;
err_ctx:
context_close(ctx);
return ERR_PTR(err);
}
struct i915_gem_context *

View file

@ -50,9 +50,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
if (i915_timeline_init(engine->i915,
&ring->timeline, engine->name,
NULL)) {
if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
kfree(ring);
return NULL;
}
@ -198,6 +196,37 @@ static void mock_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
static void mock_reset_prepare(struct intel_engine_cs *engine)
{
}
static void mock_reset(struct intel_engine_cs *engine, bool stalled)
{
GEM_BUG_ON(stalled);
}
static void mock_reset_finish(struct intel_engine_cs *engine)
{
}
static void mock_cancel_requests(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@ -223,10 +252,12 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
if (i915_timeline_init(i915,
&engine->base.timeline,
engine->base.name,
NULL))
engine->base.reset.prepare = mock_reset_prepare;
engine->base.reset.reset = mock_reset;
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
if (i915_timeline_init(i915, &engine->base.timeline, NULL))
goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);

View file

@ -182,7 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
mock_uncore_init(i915);
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);

View file

@ -26,21 +26,21 @@
#define __nop_write(x) \
static void \
nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
__nop_write(8)
__nop_write(16)
__nop_write(32)
#define __nop_read(x) \
static u##x \
nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
__nop_read(8)
__nop_read(16)
__nop_read(32)
__nop_read(64)
void mock_uncore_init(struct drm_i915_private *i915)
void mock_uncore_init(struct intel_uncore *uncore)
{
ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
ASSIGN_READ_MMIO_VFUNCS(i915, nop);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
}

View file

@ -25,6 +25,6 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
void mock_uncore_init(struct drm_i915_private *i915);
void mock_uncore_init(struct intel_uncore *uncore);
#endif /* !__MOCK_UNCORE_H */

View file

@ -78,7 +78,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port), mask, mask,
100))
DRM_ERROR("DPI FIFOs are not empty\n");
@ -148,7 +148,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
data_mask, 0,
50))
@ -162,7 +162,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 0,
50)) {
@ -174,7 +174,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port),
data_mask, data_mask,
50))
@ -234,7 +234,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port), mask, mask,
100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@ -353,16 +353,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
GLK_MIPIIO_PORT_POWERED, 20))
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED,
GLK_MIPIIO_PORT_POWERED,
20))
DRM_ERROR("MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
DEVICE_READY);
cold_boot |=
!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@ -377,9 +379,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
GLK_PHY_STATUS_PORT_READY, 20))
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY,
GLK_PHY_STATUS_PORT_READY,
20))
DRM_ERROR("PHY is not ON\n");
}
@ -403,8 +407,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE,
0,
20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@ -427,17 +434,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
GLK_DATA_LANE_STOP_STATE, 20))
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE,
GLK_DATA_LANE_STOP_STATE,
20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
AFE_LATCHOUT, 20))
if (intel_wait_for_register(&dev_priv->uncore,
BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT,
AFE_LATCHOUT,
20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@ -537,7 +548,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@ -545,7 +556,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
@ -566,7 +577,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@ -616,7 +627,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_wait_for_register(dev_priv,
intel_wait_for_register(&dev_priv->uncore,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
@ -1658,7 +1669,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_display_mode *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@ -1769,13 +1780,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
mutex_lock(&dev->mode_config.mutex);
intel_dsi_vbt_get_modes(intel_dsi);
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@ -1783,9 +1788,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);

View file

@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
0,
@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (intel_wait_for_register(dev_priv,
if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
BXT_DSI_PLL_LOCKED,

View file

@ -498,4 +498,11 @@
INTEL_VGA_DEVICE(0x8A70, info), \
INTEL_VGA_DEVICE(0x8A53, info)
/* EHL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4500, info), \
INTEL_VGA_DEVICE(0x4571, info), \
INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info)
#endif /* _I915_PCIIDS_H */

View file

@ -62,6 +62,28 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
/*
* i915_user_extension: Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
* as demonstrated by Vulkan's approach to providing extensions for forward
* and backward compatibility, is to use a list of optional structs to
* provide those extra details.
*
* The key advantage to using an extension chain is that it allows us to
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
*/
struct i915_user_extension {
__u64 next_extension;
__u32 name;
__u32 flags; /* All undefined bits must be zero. */
__u32 rsvd[4]; /* Reserved for future use; must be zero. */
};
/*
* MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.
@ -370,6 +392,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
@ -1421,65 +1444,17 @@ struct drm_i915_gem_wait {
};
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 ctx_id; /* output: id of new context*/
__u32 pad;
};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
struct drm_i915_reg_read {
/*
* Register offset.
* For 64bit wide registers where the upper 32bits don't immediately
* follow the lower 32bits, the offset of the lower 32bits must
* be specified
*/
__u64 offset;
#define I915_REG_READ_8B_WA (1ul << 0)
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that pass
* flag I915_REG_READ_8B_WA in offset field.
*
*/
struct drm_i915_reset_stats {
__u32 ctx_id;
struct drm_i915_gem_context_create_ext {
__u32 ctx_id; /* output: id of new context*/
__u32 flags;
/* All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
__u32 batch_pending;
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
(-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
__u64 extensions;
};
struct drm_i915_gem_context_param {
@ -1521,6 +1496,7 @@ struct drm_i915_gem_context_param {
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
/* Must be kept compact -- no holes and well documented */
__u64 value;
};
@ -1583,6 +1559,96 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
/*
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
* on the same file. Extensions can be provided to configure exactly how the
* address space is setup upon creation.
*
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
* No flags are defined, with all bits reserved and must be zero.
*
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
* Destroys a previously created VM id, specified in @id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
__u64 extensions;
__u32 flags;
__u32 vm_id;
};
struct drm_i915_reg_read {
/*
* Register offset.
* For 64bit wide registers where the upper 32bits don't immediately
* follow the lower 32bits, the offset of the lower 32bits must
* be specified
*/
__u64 offset;
#define I915_REG_READ_8B_WA (1ul << 0)
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that pass
* flag I915_REG_READ_8B_WA in offset field.
*
*/
struct drm_i915_reset_stats {
__u32 ctx_id;
__u32 flags;
/* All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
__u32 batch_pending;
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */