Merge tag 'drm-intel-next-2019-10-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:
- Never allow userptr into the mappable GGTT (Chris)
  No existing users. Avoid anyone from even trying to
  spare a deadlock scenario.

Cross-subsystem Changes:

Core Changes:

Driver Changes:

- Eliminate struct_mutex use as BKL! (Chris)
  Only used for execbuf serialisation.

- Initialize DDI TC and TBT ports (D-I) on Tigerlake (Lucas)
- Fix DKL link training for 2.7GHz and 1.62GHz (Jose)
- Add Tigerlake DKL PHY programming sequences (Clinton)
- Add Tigerlake Thunderbolt PLL divider values (Imre)

- drm/i915: Use helpers for drm_mm_node booleans (Chris)
- Restrict L3 remapping sysfs interface to dwords (Chris)
- Fix audio power up sequence for gen10+ display (Kai)
- Skip redundant execlist resubmission (Chris)
- Only unwedge if we can reset GPU first (Chris)
- Initialise breadcrumb lists on the virtual engine (Chris)
- Don't rely on kernel context existing during early errors (Matt A)
- Update Icelake+ MG_DP_MODE programming table (Clinton)
- Update DMC firmware for Icelake (Anusha)
- Downgrade DP MST error after unplugging TypeC cable (Srinivasan)
- Limit MST modes based on plane size too (Ville)
- Polish intel_tv_mode_valid() (Ville)
- Fix g4x sprite scaling stride check with GTT remapping (Ville)
- Don't advertize non-exisiting crtcs (Ville)
- Clean up encoder->crtc_mask setup (Ville)
- Use tc_port instead of port parameter to MG registers (Jose)
- Remove static variable for aux last status (Jani)
- Implement a better i945gm vblank irq vs. C-states workaround (Ville)

- Make the object creation interface consistent (CQ)
- Rename intel_vga_msr_write() to intel_vga_reset_io_mem() (Jani, Ville)
- Eliminate previous drm_dbg/drm_err usage (Jani)
- Move gmbus setup down to intel_modeset_init() (Jani)
- Abstract all vgaarb access to intel_vga.[ch] (Jani)
- Split out i915_switcheroo.[ch] from i915_drv.c (Jani)
- Use intel_gt in has_reset* (Chris)
- Eliminate return value for i915_gem_init_early (Matt A)
- Selftest improvements (Chris)
- Update HuC firmware header version number format (Daniele)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191007134801.GA24313@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2019-10-08 12:54:38 +10:00
commit 97ea56540f
196 changed files with 11838 additions and 8027 deletions

View File

@ -246,6 +246,15 @@ Display PLLs
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dpll_mgr.h
:internal:
Display State Buffer
--------------------
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c
:doc: DSB
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c
:internal:
Memory Management and Command Submission
========================================
@ -358,15 +367,6 @@ Batchbuffer Parsing
.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
:internal:
Batchbuffer Pools
-----------------
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
:doc: batch pool
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
:internal:
User Batchbuffer Execution
--------------------------
@ -416,31 +416,31 @@ Object Tiling IOCTLs
:doc: buffer object tiling
WOPCM
=====
-----
WOPCM Layout
------------
~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/intel_wopcm.c
:doc: WOPCM Layout
GuC
===
---
Firmware Layout
-------------------
~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
:doc: Firmware Layout
GuC-specific firmware loader
----------------------------
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
:internal:
GuC-based command submission
----------------------------
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
:doc: GuC-based command submission
@ -449,7 +449,7 @@ GuC-based command submission
:internal:
GuC Address Space
-----------------
~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
:doc: GuC Address Space

View File

@ -46,6 +46,7 @@ i915-y += i915_drv.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_csr.o \
@ -83,8 +84,10 @@ gt-y += \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ringbuffer.o \
@ -172,6 +175,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_dpio_phy.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
@ -182,7 +186,8 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o
display/intel_tc.o \
display/intel_vga.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o

View File

@ -199,7 +199,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
@ -264,10 +263,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
struct intel_plane *linked =
plane_state->planar_linked_plane;
mode = PS_SCALER_MODE_PLANAR;
if (plane_state->linked_plane)
mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
if (linked)
mode |= PS_PLANE_Y_SEL(linked->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
@ -371,6 +373,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
if (!plane) {
struct drm_plane_state *state;
/*
* GLK+ scalers don't have a HQ mode so it
* isn't necessary to change between HQ and dyn mode
* on those platforms.
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
continue;
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
@ -378,13 +389,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane->base.id);
return PTR_ERR(state);
}
/*
* the plane is added after plane checks are run,
* but since this plane is unchanged just do the
* minimum required validation.
*/
crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);

View File

@ -144,6 +144,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
const struct drm_framebuffer *fb = new_plane_state->base.fb;
int ret;
new_crtc_state->active_planes &= ~BIT(plane->id);
@ -164,11 +165,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
is_planar_yuv_format(new_plane_state->base.fb->format->format))
drm_format_info_is_yuv_semiplanar(fb->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
if (new_plane_state->base.visible || old_plane_state->base.visible)
@ -320,9 +321,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
if (new_plane_state->base.visible) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
} else if (new_plane_state->planar_slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
new_plane_state->planar_linked_plane;
/*
* We update the slave plane from this function because

View File

@ -560,8 +560,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
encoder->base.base.id, encoder->base.name,
pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
@ -609,8 +610,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
encoder->base.base.id, encoder->base.name,
pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
@ -850,11 +852,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
/* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
if (dev_priv->audio_power_refcount++ == 0)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
if (dev_priv->audio_power_refcount++ == 0) {
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
dev_priv->audio_freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE(AUD_PIN_BUF_CTL,
(I915_READ(AUD_PIN_BUF_CTL) |
AUD_PIN_BUF_ENABLE));
}
return ret;
}
@ -865,7 +879,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
@ -1114,6 +1128,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
dev_priv->audio_freq_cntrl);
}
dev_priv->audio_component_registered = true;
}

View File

@ -1833,7 +1833,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
if (!HAS_DISPLAY(dev_priv)) {
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright © 2016 Intel Corporation
* Copyright © 2016-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -35,6 +35,7 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
enum port;
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,

View File

@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
switch (val & 0xf) {
case 0:
qi->dram_type = INTEL_DRAM_DDR4;
break;
case 1:
qi->dram_type = INTEL_DRAM_DDR3;
break;
case 2:
qi->dram_type = INTEL_DRAM_LPDDR3;
break;
case 3:
qi->dram_type = INTEL_DRAM_LPDDR3;
break;
default:
MISSING_CASE(val & 0xf);
break;
if (IS_GEN(dev_priv, 12)) {
switch (val & 0xf) {
case 0:
qi->dram_type = INTEL_DRAM_DDR4;
break;
case 3:
qi->dram_type = INTEL_DRAM_LPDDR4;
break;
case 4:
qi->dram_type = INTEL_DRAM_DDR3;
break;
case 5:
qi->dram_type = INTEL_DRAM_LPDDR3;
break;
default:
MISSING_CASE(val & 0xf);
break;
}
} else if (IS_GEN(dev_priv, 11)) {
switch (val & 0xf) {
case 0:
qi->dram_type = INTEL_DRAM_DDR4;
break;
case 1:
qi->dram_type = INTEL_DRAM_DDR3;
break;
case 2:
qi->dram_type = INTEL_DRAM_LPDDR3;
break;
case 3:
qi->dram_type = INTEL_DRAM_LPDDR4;
break;
default:
MISSING_CASE(val & 0xf);
break;
}
} else {
MISSING_CASE(INTEL_GEN(dev_priv));
qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
}
qi->num_channels = (val & 0xf0) >> 4;
qi->num_points = (val & 0xf00) >> 8;
qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
if (IS_GEN(dev_priv, 12))
qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
else if (IS_GEN(dev_priv, 11))
qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
return 0;
}
@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
}
struct intel_sa_info {
u8 deburst, mpagesize, deprogbwlimit, displayrtids;
u16 displayrtids;
u8 deburst, deprogbwlimit;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
.mpagesize = 16,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv)
static const struct intel_sa_info tgl_sa_info = {
.deburst = 16,
.deprogbwlimit = 34, /* GB/s */
.displayrtids = 256,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
const struct intel_sa_info *sa = &icl_sa_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@ -233,14 +264,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 11))
icl_get_bw_info(dev_priv);
if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
icl_get_bw_info(dev_priv, &icl_sa_info);
}
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
if (IS_GEN(dev_priv, 11))
if (INTEL_GEN(dev_priv) >= 11)
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,13 @@ struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
struct intel_cdclk_vals {
u16 refclk;
u32 cdclk;
u8 divider; /* CD2X divider * 2 */
u8 ratio;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_CDCLK_H__ */

View File

@ -42,6 +42,21 @@
#define LEGACY_LUT_LENGTH 256
/*
* ILK+ csc matrix:
*
* |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
* |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
* |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
*
* ILK/SNB don't have explicit post offsets, and instead
* CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
* CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
* CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
* CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
* CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
*/
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@ -59,37 +74,38 @@
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
/* Nop pre/post offsets */
static const u16 ilk_csc_off_zero[3] = {};
/* Identity matrix */
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
/* Limited range RGB post offsets */
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
/* Full range RGB -> limited range RGB matrix */
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
/*
* These values are direct register values specified in the Bspec,
* for RGB->YUV conversion matrix (colorspace BT709)
*/
/* BT.709 full range RGB -> limited range YCbCr matrix */
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
/* Post offset values for RGB->YCBCR conversion */
/* Limited range YCbCr post offsets */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@ -624,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
1 << 16);
}
intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@ -787,78 +809,83 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
intel_dsb_put(dsb);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
* Every entry in the multi-segment LUT is corresponding to a superfine
* segment step which is 1/(8 * 128 * 256).
* Program Super Fine segment (let's call it seg1)...
*
* Superfine segment has 9 entries, corresponding to values
* 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
* Super Fine segment's step is 1/(8 * 128 * 256) and it has
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
intel_dsb_put(dsb);
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
*
* Program Fine segment (let's call it seg2)...
*
* Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
* ... 256/(128*256). So in order to program fine segment of LUT we
* need to pick every 8'th entry in LUT, and program 256 indexes.
* Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
* ... 256/(128 * 256). So in order to program fine segment of LUT we
* need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* with seg2[0] being unused by the hardware.
* seg2[0] being unused by the hardware.
*/
I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
* Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
* 1/256, 2/256 ...256/256. As per the description of each entry in LUT
* Coarse segment starts from index 0 and it's step is 1/256 ie 0,
* 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
@ -868,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
intel_dsb_put(dsb);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
@ -890,16 +921,17 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
}
intel_dsb_commit(dsb);
intel_dsb_put(dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@ -1197,6 +1229,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
{
/*
* CSC comes after the LUT in RGB->YCbCr mode.
* RGB->YCbCr needs the limited range offsets added to
* the output. RGB limited range output is handled by
* the hw automagically elsewhere.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
return CSC_BLACK_SCREEN_OFFSET;
return CSC_MODE_YUV_TO_RGB |
CSC_POSITION_BEFORE_GAMMA;
}
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@ -1210,15 +1257,15 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
!crtc_state->c8_planes;
/*
* We don't expose the ctm on ilk/snb currently,
* nor do we enable YCbCr output. Also RGB limited
* range output is handled by the hw automagically.
* We don't expose the ctm on ilk/snb currently, also RGB
* limited range output is handled by the hw automagically.
*/
crtc_state->csc_enable = false;
crtc_state->csc_enable =
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
crtc_state->csc_mode = 0;
crtc_state->csc_mode = ilk_csc_mode(crtc_state);
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
@ -1371,6 +1418,382 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
return 0;
}
static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
{
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
return 16;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
{
if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
return 0;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
return 10;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
{
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
return 10;
else
return i9xx_gamma_precision(crtc_state);
}
static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
{
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
return 10;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!crtc_state->gamma_enable)
return 0;
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv))
return chv_gamma_precision(crtc_state);
else
return i9xx_gamma_precision(crtc_state);
} else {
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
}
return 0;
}
static bool err_check(struct drm_color_lut *lut1,
struct drm_color_lut *lut2, u32 err)
{
return ((abs((long)lut2->red - lut1->red)) <= err) &&
((abs((long)lut2->blue - lut1->blue)) <= err) &&
((abs((long)lut2->green - lut1->green)) <= err);
}
static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
struct drm_color_lut *lut2,
int lut_size, u32 err)
{
int i;
for (i = 0; i < lut_size; i++) {
if (!err_check(&lut1[i], &lut2[i], err))
return false;
}
return true;
}
bool intel_color_lut_equal(struct drm_property_blob *blob1,
struct drm_property_blob *blob2,
u32 gamma_mode, u32 bit_precision)
{
struct drm_color_lut *lut1, *lut2;
int lut_size1, lut_size2;
u32 err;
if (!blob1 != !blob2)
return false;
if (!blob1)
return true;
lut_size1 = drm_color_lut_size(blob1);
lut_size2 = drm_color_lut_size(blob2);
/* check sw and hw lut size */
switch (gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
case GAMMA_MODE_MODE_10BIT:
if (lut_size1 != lut_size2)
return false;
break;
default:
MISSING_CASE(gamma_mode);
return false;
}
lut1 = blob1->data;
lut2 = blob2->data;
err = 0xffff >> bit_precision;
/* check sw and hw lut entry to be equal */
switch (gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
case GAMMA_MODE_MODE_10BIT:
if (!intel_color_lut_entry_equal(lut1, lut2,
lut_size2, err))
return false;
break;
default:
MISSING_CASE(gamma_mode);
return false;
}
return true;
}
/* convert hw value with given bit_precision to lut property val */
static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
{
u32 max = 0xffff >> (16 - bit_precision);
val = clamp_val(val, 0, max);
if (bit_precision < 16)
val <<= 16 - bit_precision;
return val;
}
static struct drm_property_blob *
i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 i, val;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
NULL);
if (IS_ERR(blob))
return NULL;
blob_data = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
if (HAS_GMCH(dev_priv))
val = I915_READ(PALETTE(pipe, i));
else
val = I915_READ(LGC_PALETTE(pipe, i));
blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
LGC_PALETTE_RED_MASK, val), 8);
blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
LGC_PALETTE_GREEN_MASK, val), 8);
blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
LGC_PALETTE_BLUE_MASK, val), 8);
}
return blob;
}
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
}
static struct drm_property_blob *
i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 i, val1, val2;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
NULL);
if (IS_ERR(blob))
return NULL;
blob_data = blob->data;
for (i = 0; i < lut_size - 1; i++) {
val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
REG_FIELD_GET(PALETTE_RED_MASK, val1);
blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
}
blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
I915_READ(PIPEGCMAX(pipe, 0)));
blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
I915_READ(PIPEGCMAX(pipe, 1)));
blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
I915_READ(PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
else
crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
}
static struct drm_property_blob *
chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 i, val;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
NULL);
if (IS_ERR(blob))
return NULL;
blob_data = blob->data;
for (i = 0; i < lut_size; i++) {
val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
CGM_PIPE_GAMMA_RED_MASK, val), 10);
}
return blob;
}
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
else
crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
}
static struct drm_property_blob *
ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 i, val;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
NULL);
if (IS_ERR(blob))
return NULL;
blob_data = blob->data;
for (i = 0; i < lut_size; i++) {
val = I915_READ(PREC_PALETTE(pipe, i));
blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
PREC_PALETTE_RED_MASK, val), 10);
blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
PREC_PALETTE_GREEN_MASK, val), 10);
blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
PREC_PALETTE_BLUE_MASK, val), 10);
}
return blob;
}
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
else
crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
}
static struct drm_property_blob *
glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *blob_data;
u32 i, val;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
NULL);
if (IS_ERR(blob))
return NULL;
blob_data = blob->data;
I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
val = I915_READ(PREC_PAL_DATA(pipe));
blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
PREC_PAL_DATA_RED_MASK, val), 10);
blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
PREC_PAL_DATA_GREEN_MASK, val), 10);
blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
PREC_PAL_DATA_BLUE_MASK, val), 10);
}
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
return blob;
}
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
else
crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -1383,14 +1806,17 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
dev_priv->display.read_luts = chv_read_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
dev_priv->display.read_luts = i965_read_luts;
} else {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i9xx_load_luts;
dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
if (INTEL_GEN(dev_priv) >= 11)
@ -1409,16 +1835,19 @@ void intel_color_init(struct intel_crtc *crtc)
else
dev_priv->display.color_commit = ilk_color_commit;
if (INTEL_GEN(dev_priv) >= 11)
if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
} else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
else if (INTEL_GEN(dev_priv) >= 8)
dev_priv->display.read_luts = glk_read_luts;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
else if (INTEL_GEN(dev_priv) >= 7)
} else if (INTEL_GEN(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
else
} else {
dev_priv->display.load_luts = ilk_load_luts;
dev_priv->display.read_luts = ilk_read_luts;
}
}
drm_crtc_enable_color_mgmt(&crtc->base,

View File

@ -6,13 +6,20 @@
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
#include <linux/types.h>
struct intel_crtc_state;
struct intel_crtc;
struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(struct drm_property_blob *blob1,
struct drm_property_blob *blob2,
u32 gamma_mode, u32 bit_precision);
#endif /* __INTEL_COLOR_H__ */

View File

@ -994,9 +994,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
crt->base.crtc_mask = BIT(PIPE_A);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
crt->base.crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;

View File

@ -586,6 +586,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
{ 0x0, 0x00, 0x00 }, /* 3 0 */
};
struct tgl_dkl_phy_ddi_buf_trans {
u32 dkl_vswing_control;
u32 dkl_preshoot_control;
u32 dkl_de_emphasis_control;
};
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
{ 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
{ 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
{ 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
{ 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
{ 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
{ 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
{ 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
{ 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
{ 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
};
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@ -872,7 +892,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (INTEL_GEN(dev_priv) >= 11) {
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
@ -1049,6 +1076,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
case DPLL_ID_TGL_MGPLL5:
case DPLL_ID_TGL_MGPLL6:
return DDI_CLK_SEL_MG;
}
}
@ -1413,11 +1442,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->cdclk.hw.ref;
m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
(pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
if (INTEL_GEN(dev_priv) >= 12) {
m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
m2_frac = pll_state->mg_pll_bias &
DKL_PLL_BIAS_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
} else {
m2_frac = 0;
}
} else {
m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
m2_frac = pll_state->mg_pll_div0 &
MG_PLL_DIV0_FBDIV_FRAC_MASK;
m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
} else {
m2_frac = 0;
}
}
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
@ -1706,9 +1754,6 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
temp = TRANS_MSA_SYNC_CLK;
if (crtc_state->limited_color_range)
temp |= TRANS_MSA_CEA_RANGE;
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
@ -1727,13 +1772,22 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
break;
}
/* nonsense combination */
WARN_ON(crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
temp |= TRANS_MSA_CEA_RANGE;
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
* colorspace information. The output colorspace encoding is BT601.
* colorspace information.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR |
TRANS_MSA_YCBCR_BT709;
/*
* As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
* of Color Encoding Format and Content Color Gamut] while sending
@ -1761,7 +1815,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
* Only intended to be used by intel_ddi_enable_transcoder_func() and
* intel_ddi_config_transcoder_func().
*/
static u32
intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
@ -1845,6 +1906,34 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
return temp;
}
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
/*
* Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
* bit.
*/
static void
intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
temp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@ -2045,18 +2134,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
port_name(port));
DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
port_name(port), *pipe_mask);
DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
port_name(port), *pipe_mask, mst_pipe_mask);
DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@ -2066,8 +2157,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
DRM_ERROR("Port %c enabled but PHY powered down? "
"(PHY_CTL %08x)\n", port_name(port), tmp);
DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
"(PHY_CTL %08x)\n", encoder->base.base.id,
encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@ -2269,7 +2361,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
if (INTEL_GEN(dev_priv) >= 11) {
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
@ -2583,7 +2681,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
int ln;
@ -2599,33 +2697,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_DRVCTRL(ln, port));
val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@ -2633,9 +2731,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
val = I915_READ(MG_TX2_DRVCTRL(ln, port));
val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@ -2643,7 +2741,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@ -2654,17 +2752,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_CLKHUB(ln, port));
val = I915_READ(MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
I915_WRITE(MG_CLKHUB(ln, port), val);
I915_WRITE(MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_DCC(ln, port));
val = I915_READ(MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@ -2672,9 +2770,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
I915_WRITE(MG_TX1_DCC(ln, port), val);
I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
val = I915_READ(MG_TX2_DCC(ln, port));
val = I915_READ(MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@ -2682,18 +2780,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
I915_WRITE(MG_TX2_DCC(ln, port), val);
I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
}
}
@ -2711,6 +2809,62 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
static void
tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
ddi_translations = tgl_dkl_phy_ddi_translations;
if (level >= n_entries)
level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
for (ln = 0; ln < 2; ln++) {
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
/* All the registers are RMW */
val = I915_READ(DKL_TX_DPCNTL0(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
val = I915_READ(DKL_TX_DPCNTL1(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
val = I915_READ(DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
}
}
static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
int link_clock,
u32 level,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
static u32 translate_signal_level(int signal_levels)
{
int i;
@ -2742,7 +2896,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) >= 11)
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@ -2989,130 +3146,141 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
static void
icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 val;
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
u32 val, bits;
int ln;
if (tc_port == PORT_TC_NONE)
return;
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_DP_MODE(ln, port));
val |= MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
MG_MISC_SUS0_CFG_TR2PWR_GATING |
MG_MISC_SUS0_CFG_CL2PWR_GATING |
MG_MISC_SUS0_CFG_GAONPWR_GATING |
MG_MISC_SUS0_CFG_TRPWR_GATING |
MG_MISC_SUS0_CFG_CL1PWR_GATING |
MG_MISC_SUS0_CFG_DGPWR_GATING;
I915_WRITE(MG_MISC_SUS0(tc_port), val);
}
static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 val;
int ln;
if (tc_port == PORT_TC_NONE)
return;
bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
for (ln = 0; ln < 2; ln++) {
val = I915_READ(MG_DP_MODE(ln, port));
val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING);
I915_WRITE(MG_DP_MODE(ln, port), val);
if (INTEL_GEN(dev_priv) >= 12) {
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
val = I915_READ(DKL_DP_MODE(tc_port));
} else {
val = I915_READ(MG_DP_MODE(ln, tc_port));
}
if (enable)
val |= bits;
else
val &= ~bits;
if (INTEL_GEN(dev_priv) >= 12)
I915_WRITE(DKL_DP_MODE(tc_port), val);
else
I915_WRITE(MG_DP_MODE(ln, tc_port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
MG_MISC_SUS0_CFG_TR2PWR_GATING |
MG_MISC_SUS0_CFG_CL2PWR_GATING |
MG_MISC_SUS0_CFG_GAONPWR_GATING |
MG_MISC_SUS0_CFG_TRPWR_GATING |
MG_MISC_SUS0_CFG_CL1PWR_GATING |
MG_MISC_SUS0_CFG_DGPWR_GATING);
I915_WRITE(MG_MISC_SUS0(tc_port), val);
if (INTEL_GEN(dev_priv) == 11) {
bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
MG_MISC_SUS0_CFG_CL2PWR_GATING |
MG_MISC_SUS0_CFG_GAONPWR_GATING |
MG_MISC_SUS0_CFG_TRPWR_GATING |
MG_MISC_SUS0_CFG_CL1PWR_GATING |
MG_MISC_SUS0_CFG_DGPWR_GATING;
val = I915_READ(MG_MISC_SUS0(tc_port));
if (enable)
val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
else
val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
I915_WRITE(MG_MISC_SUS0(tc_port), val);
}
}
static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
static void
icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
u32 ln0, ln1, lane_mask;
enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
u32 ln0, ln1, pin_assignment;
u8 width;
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
ln0 = I915_READ(MG_DP_MODE(0, port));
ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_mode) {
case TC_PORT_DP_ALT:
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
switch (lane_mask) {
case 0x1:
case 0x4:
break;
case 0x2:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
break;
case 0x3:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
break;
case 0xC:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
case 0xF:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
MISSING_CASE(lane_mask);
}
break;
case TC_PORT_LEGACY:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
MISSING_CASE(intel_dig_port->tc_mode);
return;
if (INTEL_GEN(dev_priv) >= 12) {
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
ln0 = I915_READ(DKL_DP_MODE(tc_port));
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
ln1 = I915_READ(DKL_DP_MODE(tc_port));
} else {
ln0 = I915_READ(MG_DP_MODE(0, tc_port));
ln1 = I915_READ(MG_DP_MODE(1, tc_port));
}
I915_WRITE(MG_DP_MODE(0, port), ln0);
I915_WRITE(MG_DP_MODE(1, port), ln1);
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
case 0x0:
WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
case 0x1:
if (width == 4) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
case 0x2:
if (width == 2) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
case 0x3:
case 0x5:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
case 0x4:
case 0x6:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
default:
MISSING_CASE(pin_assignment);
}
if (INTEL_GEN(dev_priv) >= 12) {
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
I915_WRITE(DKL_DP_MODE(tc_port), ln0);
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
I915_WRITE(DKL_DP_MODE(tc_port), ln1);
} else {
I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
}
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@ -3129,17 +3297,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
val = I915_READ(DP_TP_CTL(port));
intel_dp = enc_to_intel_dp(&encoder->base);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@ -3148,21 +3317,123 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
val = I915_READ(DP_TP_CTL(port));
intel_dp = enc_to_intel_dp(&encoder->base);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
}
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
/* 1.a got on intel_atomic_commit_tail() */
/* 2. */
intel_edp_panel_on(intel_dp);
/*
* 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
* haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
* haswell_crtc_enable()->intel_enable_shared_dpll()
*/
/* 4.b */
intel_ddi_clk_select(encoder, crtc_state);
/* 5. */
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
/* 6. */
icl_program_mg_dp_mode(dig_port, crtc_state);
/*
* 7.a - Steps in this function should only be executed over MST
* master, what will be taken in care by MST hook
* intel_mst_pre_enable_dp()
*/
intel_ddi_enable_pipe_clock(crtc_state);
/* 7.b */
intel_ddi_config_transcoder_func(crtc_state);
/* 7.d */
icl_phy_set_clock_gating(dig_port, false);
/* 7.e */
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
encoder->type);
/* 7.f */
if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
/* 7.g */
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
/* 7.c, 7.h, 7.i, 7.j */
intel_dp_start_link_train(intel_dp);
/* 7.k */
intel_dp_stop_link_train(intel_dp);
/*
* TODO: enable clock gating
*
* It is not written in DP enabling sequence but "PHY Clockgating
* programming" states that clock gating should be enabled after the
* link training but doing so causes all the following trainings to fail
* so not enabling it for now.
*/
/* 7.l */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -3177,6 +3448,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@ -3186,8 +3460,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
icl_program_mg_dp_mode(dig_port, crtc_state);
icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@ -3220,7 +3494,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_fec(encoder, crtc_state);
icl_enable_phy_clock_gating(dig_port);
icl_phy_set_clock_gating(dig_port, true);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@ -3228,6 +3502,18 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@ -3244,10 +3530,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
icl_program_mg_dp_mode(dig_port, crtc_state);
icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (INTEL_GEN(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@ -3257,7 +3546,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
icl_enable_phy_clock_gating(dig_port);
icl_phy_set_clock_gating(dig_port, true);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@ -3330,10 +3619,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
val = I915_READ(DP_TP_CTL(port));
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
if (intel_crtc_has_dp_encoder(crtc_state)) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
}
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@ -3761,7 +4054,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 val;
bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@ -3769,11 +4062,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
wait = true;
}
val = I915_READ(DP_TP_CTL(port));
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@ -3788,8 +4081,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
@ -3891,6 +4184,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
if (INTEL_GEN(dev_priv) >= 11) {
i915_reg_t dp_tp_ctl;
if (IS_GEN(dev_priv, 11))
dp_tp_ctl = DP_TP_CTL(encoder->port);
else
dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
pipe_config->fec_enable =
I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
encoder->base.base.id, encoder->base.name,
pipe_config->fec_enable);
}
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright © 2006-2017 Intel Corporation
* Copyright © 2006-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -32,8 +32,10 @@ enum link_m_n_set;
struct dpll;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
struct drm_file;
struct drm_format_info;
struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
@ -182,6 +184,24 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
enum port {
PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
PORT_D,
PORT_E,
PORT_F,
PORT_G,
PORT_H,
PORT_I,
I915_MAX_PORTS
};
#define port_name(p) ((p) + 'A')
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
@ -289,10 +309,10 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@ -411,6 +431,15 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
(__i) >= 0 && \
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
(old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
(__i)--) \
for_each_if(crtc)
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@ -420,6 +449,9 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
void intel_plane_destroy(struct drm_plane *plane);
@ -521,7 +553,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
const struct drm_format_info *format);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@ -544,13 +576,10 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
/* modesetting */
void intel_modeset_init_hw(struct drm_device *dev);
int intel_modeset_init(struct drm_device *dev);
void intel_modeset_driver_remove(struct drm_device *dev);
int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
void intel_modeset_init_hw(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void i915_redisable_vga(struct drm_i915_private *dev_priv);
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
/* modesetting asserts */

View File

@ -3,8 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
#include <linux/vgaarb.h>
#include "display/intel_crt.h"
#include "display/intel_dp.h"
@ -19,16 +17,14 @@
#include "intel_hotplug.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
intel_display_power_domain_str(struct drm_i915_private *i915,
enum intel_display_power_domain domain)
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
bool ddi_tc_ports = IS_GEN(i915, 12);
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@ -71,23 +67,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
POWER_DOMAIN_PORT_DDI_TC1_LANES);
return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
POWER_DOMAIN_PORT_DDI_TC2_LANES);
return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
POWER_DOMAIN_PORT_DDI_TC3_LANES);
return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
case POWER_DOMAIN_PORT_DDI_TC4_LANES:
return "PORT_DDI_TC4_LANES";
case POWER_DOMAIN_PORT_DDI_TC5_LANES:
return "PORT_DDI_TC5_LANES";
case POWER_DOMAIN_PORT_DDI_TC6_LANES:
return "PORT_DDI_TC6_LANES";
return "PORT_DDI_F_LANES";
case POWER_DOMAIN_PORT_DDI_G_LANES:
return "PORT_DDI_G_LANES";
case POWER_DOMAIN_PORT_DDI_H_LANES:
return "PORT_DDI_H_LANES";
case POWER_DOMAIN_PORT_DDI_I_LANES:
return "PORT_DDI_I_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@ -95,23 +85,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
POWER_DOMAIN_PORT_DDI_TC1_IO);
return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
POWER_DOMAIN_PORT_DDI_TC2_IO);
return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
POWER_DOMAIN_PORT_DDI_TC3_IO);
return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
case POWER_DOMAIN_PORT_DDI_TC4_IO:
return "PORT_DDI_TC4_IO";
case POWER_DOMAIN_PORT_DDI_TC5_IO:
return "PORT_DDI_TC5_IO";
case POWER_DOMAIN_PORT_DDI_TC6_IO:
return "PORT_DDI_TC6_IO";
return "PORT_DDI_F_IO";
case POWER_DOMAIN_PORT_DDI_G_IO:
return "PORT_DDI_G_IO";
case POWER_DOMAIN_PORT_DDI_H_IO:
return "PORT_DDI_H_IO";
case POWER_DOMAIN_PORT_DDI_I_IO:
return "PORT_DDI_I_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@ -129,34 +113,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
return "AUX_D";
case POWER_DOMAIN_AUX_E:
BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
return "AUX_E";
case POWER_DOMAIN_AUX_F:
BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
case POWER_DOMAIN_AUX_TC4:
return "AUX_TC4";
case POWER_DOMAIN_AUX_TC5:
return "AUX_TC5";
case POWER_DOMAIN_AUX_TC6:
return "AUX_TC6";
return "AUX_F";
case POWER_DOMAIN_AUX_G:
return "AUX_G";
case POWER_DOMAIN_AUX_H:
return "AUX_H";
case POWER_DOMAIN_AUX_I:
return "AUX_I";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
case POWER_DOMAIN_AUX_TBT1:
return "AUX_TBT1";
case POWER_DOMAIN_AUX_TBT2:
return "AUX_TBT2";
case POWER_DOMAIN_AUX_TBT3:
return "AUX_TBT3";
case POWER_DOMAIN_AUX_TBT4:
return "AUX_TBT4";
case POWER_DOMAIN_AUX_TBT5:
return "AUX_TBT5";
case POWER_DOMAIN_AUX_TBT6:
return "AUX_TBT6";
case POWER_DOMAIN_AUX_C_TBT:
return "AUX_C_TBT";
case POWER_DOMAIN_AUX_D_TBT:
return "AUX_D_TBT";
case POWER_DOMAIN_AUX_E_TBT:
return "AUX_E_TBT";
case POWER_DOMAIN_AUX_F_TBT:
return "AUX_F_TBT";
case POWER_DOMAIN_AUX_G_TBT:
return "AUX_G_TBT";
case POWER_DOMAIN_AUX_H_TBT:
return "AUX_H_TBT";
case POWER_DOMAIN_AUX_I_TBT:
return "AUX_I_TBT";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@ -283,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 irq_pipe_mask, bool has_vga)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
if (has_vga) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
if (has_vga)
intel_vga_reset_io_mem(dev_priv);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
@ -578,6 +546,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#endif
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@ -594,6 +564,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
DKL_CMN_UC_DW27_UC_HEALTH, 1))
DRM_WARN("Timeout waiting TC uC health\n");
}
}
static void
@ -1208,7 +1189,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
i915_redisable_vga_power_on(dev_priv);
intel_vga_redisable_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@ -1718,15 +1699,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
struct drm_i915_private *i915 =
container_of(power_domains, struct drm_i915_private,
power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
intel_display_power_domain_str(i915, domain),
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
@ -1896,7 +1874,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(dev_priv, domain);
const char *name = intel_display_power_domain_str(domain);
power_domains = &dev_priv->power_domains;
@ -2487,10 +2465,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@ -2530,22 +2508,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
#define ICL_AUX_C_IO_POWER_DOMAINS ( \
#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
#define ICL_AUX_D_IO_POWER_DOMAINS ( \
#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
#define ICL_AUX_E_IO_POWER_DOMAINS ( \
#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
#define ICL_AUX_F_IO_POWER_DOMAINS ( \
#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT1))
#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT2))
#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT3))
#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT4))
#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
#define TGL_PW_5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
@ -2565,24 +2543,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
BIT_ULL(POWER_DOMAIN_AUX_G) | \
BIT_ULL(POWER_DOMAIN_AUX_H) | \
BIT_ULL(POWER_DOMAIN_AUX_I) | \
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@ -2598,35 +2576,50 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC1))
#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC2))
#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC3))
#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC4))
#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC5))
#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TC6))
#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT5))
#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT6))
#define TGL_AUX_A_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_AUX_A))
#define TGL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
#define TGL_AUX_C_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_G))
#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_H))
#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_I))
#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@ -3484,8 +3477,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
.name = "AUX C TC1",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3495,8 +3488,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_IO_POWER_DOMAINS,
.name = "AUX D TC2",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3506,8 +3499,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX E",
.domains = ICL_AUX_E_IO_POWER_DOMAINS,
.name = "AUX E TC3",
.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3517,8 +3510,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX F",
.domains = ICL_AUX_F_IO_POWER_DOMAINS,
.name = "AUX F TC4",
.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3528,8 +3521,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
.name = "AUX C TBT1",
.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3539,8 +3532,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
.name = "AUX D TBT2",
.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3550,8 +3543,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
.name = "AUX E TBT3",
.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3561,8 +3554,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
.name = "AUX F TBT4",
.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3667,8 +3660,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
.name = "DDI TC1 IO",
.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
.name = "DDI D TC1 IO",
.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3677,8 +3670,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "DDI TC2 IO",
.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
.name = "DDI E TC2 IO",
.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3687,8 +3680,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "DDI TC3 IO",
.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
.name = "DDI F TC3 IO",
.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3697,8 +3690,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "DDI TC4 IO",
.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
.name = "DDI G TC4 IO",
.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3707,8 +3700,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "DDI TC5 IO",
.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
.name = "DDI H TC5 IO",
.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3717,8 +3710,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "DDI TC6 IO",
.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
.name = "DDI I TC6 IO",
.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3728,7 +3721,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3738,7 +3731,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3748,7 +3741,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3757,8 +3750,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC1",
.domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
.name = "AUX D TC1",
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3768,8 +3761,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC2",
.domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
.name = "AUX E TC2",
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3779,8 +3772,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC3",
.domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
.name = "AUX F TC3",
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3790,8 +3783,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC4",
.domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
.name = "AUX G TC4",
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3801,8 +3794,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC5",
.domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
.name = "AUX H TC5",
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3812,8 +3805,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TC6",
.domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
.name = "AUX I TC6",
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3823,8 +3816,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3834,8 +3827,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3845,8 +3838,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3856,8 +3849,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3867,8 +3860,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT5",
.domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -3878,8 +3871,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
.name = "AUX TBT6",
.domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@ -5104,8 +5097,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
intel_display_power_domain_str(i915,
domain),
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}

View File

@ -36,29 +36,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
POWER_DOMAIN_PORT_DDI_TC4_LANES,
POWER_DOMAIN_PORT_DDI_TC5_LANES,
POWER_DOMAIN_PORT_DDI_TC6_LANES,
POWER_DOMAIN_PORT_DDI_G_LANES,
POWER_DOMAIN_PORT_DDI_H_LANES,
POWER_DOMAIN_PORT_DDI_I_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@ -68,21 +59,19 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_TC4,
POWER_DOMAIN_AUX_TC5,
POWER_DOMAIN_AUX_TC6,
POWER_DOMAIN_AUX_G,
POWER_DOMAIN_AUX_H,
POWER_DOMAIN_AUX_I,
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
POWER_DOMAIN_AUX_TBT5,
POWER_DOMAIN_AUX_TBT6,
POWER_DOMAIN_AUX_C_TBT,
POWER_DOMAIN_AUX_D_TBT,
POWER_DOMAIN_AUX_E_TBT,
POWER_DOMAIN_AUX_F_TBT,
POWER_DOMAIN_AUX_G_TBT,
POWER_DOMAIN_AUX_H_TBT,
POWER_DOMAIN_AUX_I_TBT,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@ -269,8 +258,7 @@ void intel_display_power_suspend(struct drm_i915_private *i915);
void intel_display_power_resume(struct drm_i915_private *i915);
const char *
intel_display_power_domain_str(struct drm_i915_private *i915,
enum intel_display_power_domain domain);
intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);

View File

@ -388,6 +388,13 @@ struct intel_hdcp {
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
/*
* HDCP register access for gen12+ need the transcoder associated.
* Transcoder attached to the connector could be changed at modeset.
* Hence caching the transcoder here.
*/
enum transcoder cpu_transcoder;
};
struct intel_connector {
@ -481,9 +488,9 @@ struct intel_atomic_state {
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
unsigned int active_pipe_changes;
u8 active_pipe_changes;
unsigned int active_crtcs;
u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@ -552,24 +559,24 @@ struct intel_plane_state {
int scaler_id;
/*
* linked_plane:
* planar_linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
struct intel_plane *linked_plane;
struct intel_plane *planar_linked_plane;
/*
* slave:
* planar_slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
u32 slave;
u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
};
@ -759,7 +766,6 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
/* Pipe source size (ie. panel fitter input size)
@ -1026,6 +1032,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
/* per pipe DSB related info */
struct intel_dsb dsb;
};
struct intel_plane {
@ -1176,6 +1185,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@ -1211,6 +1221,15 @@ struct intel_dp {
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
/*
* DP_TP_* registers may be either on port or transcoder register space.
*/
struct {
i915_reg_t dp_tp_ctl;
i915_reg_t dp_tp_status;
} regs;
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@ -1269,6 +1288,7 @@ struct intel_digital_port {
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@ -1509,7 +1529,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);

View File

@ -68,11 +68,6 @@
#define DP_DPRX_ESI_LEN 14
/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
#define DP_DSC_MIN_SUPPORTED_BPC 8
#define DP_DSC_MAX_SUPPORTED_BPC 10
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@ -500,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
DP_DSC_FEC_OVERHEAD_FACTOR);
}
static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
static int
small_joiner_ram_size_bits(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 11)
return 7680 * 8;
else
return 6144 * 8;
}
static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay)
{
u32 bits_per_pixel, max_bpp_small_joiner_ram;
@ -517,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
mode_hdisplay;
DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
/*
@ -634,7 +640,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
true);
} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
intel_dp_dsc_get_output_bpp(max_link_clock,
intel_dp_dsc_get_output_bpp(dev_priv,
max_link_clock,
max_lanes,
target_clock,
mode->hdisplay) >> 4;
@ -655,7 +662,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
return MODE_OK;
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@ -732,12 +739,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->base.port)))
"skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
pipe_name(pipe), intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name))
return;
DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
pipe_name(pipe), port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(pipe), intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@ -855,9 +864,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
port_name(intel_dig_port->base.port));
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@ -965,13 +975,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
port_name(port));
DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
return;
}
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name,
pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@ -1334,13 +1347,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
static u32 last_status = -1;
const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != last_status) {
if (status != intel_dp->aux_busy_last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
status);
last_status = status;
intel_dp->aux_busy_last_status = status;
}
ret = -EBUSY;
@ -1830,8 +1842,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
return INTEL_GEN(dev_priv) >= 11 &&
pipe_config->cpu_transcoder != TRANSCODER_A;
/* On TGL, FEC is supported on all Pipes */
if (INTEL_GEN(dev_priv) >= 12)
return true;
if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
return true;
return false;
}
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@ -1846,8 +1864,15 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
return INTEL_GEN(dev_priv) >= 10 &&
pipe_config->cpu_transcoder != TRANSCODER_A;
/* On TGL, DSC is supported on all Pipes */
if (INTEL_GEN(dev_priv) >= 12)
return true;
if (INTEL_GEN(dev_priv) >= 10 &&
pipe_config->cpu_transcoder != TRANSCODER_A)
return true;
return false;
}
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
@ -2006,11 +2031,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
conn_state->max_requested_bpc);
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
if (INTEL_GEN(dev_priv) >= 12)
dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
else
dsc_max_bpc = min_t(u8, 10,
conn_state->max_requested_bpc);
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
return -EINVAL;
}
@ -2036,7 +2067,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
u8 dsc_dp_slice_count;
dsc_max_output_bpp =
intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
intel_dp_dsc_get_output_bpp(dev_priv,
pipe_config->port_clock,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
@ -2218,6 +2250,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
/*
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
* some conflicting bits in PIPECONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
@ -2335,6 +2377,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
intel_hdcp_transcoder_config(intel_connector,
pipe_config->cpu_transcoder);
return 0;
}
@ -2362,6 +2407,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
/*
* There are four kinds of DP registers:
*
@ -2563,8 +2611,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@ -2583,8 +2632,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@ -2609,8 +2659,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->base.port));
I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name);
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@ -2628,8 +2679,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@ -2691,8 +2743,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->base.port));
I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name);
intel_dp->want_panel_vdd = false;
@ -2713,12 +2766,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
port_name(dp_to_dig_port(intel_dp)->base.port));
DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name);
if (WARN(edp_have_panel_power(intel_dp),
"eDP port %c panel power already on\n",
port_name(dp_to_dig_port(intel_dp)->base.port)))
"[ENCODER:%d:%s] panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@ -2773,11 +2828,11 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
port_name(dig_port->base.port));
DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
dig_port->base.base.base.id, dig_port->base.base.name);
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
port_name(dig_port->base.port));
WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@ -2922,8 +2977,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->base.port),
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
dig_port->base.base.base.id, dig_port->base.base.name,
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@ -3311,7 +3366,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
u32 temp = I915_READ(DP_TP_CTL(port));
u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@ -3337,7 +3392,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
I915_WRITE(DP_TP_CTL(port), temp);
I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@ -3501,8 +3556,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
pipe_name(pipe), intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
@ -3518,17 +3574,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
pipe_name(pipe), port_name(port));
"stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@ -3570,8 +3627,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@ -4035,22 +4093,22 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), val);
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
/*
* On PORT_A we can have only eDP in SST mode. There the only reason
* we need to set idle transmission mode is to work around a HW issue
* where we enable the pipe while not in idle link-training mode.
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
* reason we need to set idle transmission mode is to work around a HW
* issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
if (port == PORT_A)
if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@ -4392,9 +4450,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
port_name(encoder->port), yesno(intel_dp->can_mst),
yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support? port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
yesno(intel_dp->can_mst), yesno(sink_can_mst),
yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@ -6270,13 +6329,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->base.port));
DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
port_name(intel_dig_port->base.port),
DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@ -7140,8 +7201,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
intel_dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@ -7182,9 +7244,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
port != PORT_B && port != PORT_C))
return false;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@ -7303,11 +7365,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
intel_encoder->crtc_mask = BIT(PIPE_C);
else
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;

View File

@ -13,6 +13,7 @@
#include "i915_reg.h"
enum pipe;
enum port;
struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;

View File

@ -215,7 +215,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
DRM_DEBUG_KMS("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@ -326,8 +325,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
temp = I915_READ(DP_TP_STATUS(port));
I915_WRITE(DP_TP_STATUS(port), temp);
temp = I915_READ(intel_dp->regs.dp_tp_status);
I915_WRITE(intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@ -342,11 +341,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
@ -426,6 +424,7 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@ -453,7 +452,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
return MODE_OK;
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@ -599,6 +598,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe_iter;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@ -615,8 +616,9 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
for_each_pipe(dev_priv, pipe_iter)
intel_encoder->crtc_mask |= BIT(pipe_iter);
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;

View File

@ -2520,6 +2520,18 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
.dco_integer = 0x54, .dco_fraction = 0x3000,
/* the following params are unused */
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};
static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
.dco_integer = 0x43, .dco_fraction = 0x4000,
/* the following params are unused */
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
@ -2547,8 +2559,34 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
if (INTEL_GEN(dev_priv) >= 12) {
switch (dev_priv->cdclk.hw.ref) {
default:
MISSING_CASE(dev_priv->cdclk.hw.ref);
/* fall-through */
case 19200:
case 38400:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
case 24000:
*pll_params = tgl_tbt_pll_24MHz_values;
break;
}
} else {
switch (dev_priv->cdclk.hw.ref) {
default:
MISSING_CASE(dev_priv->cdclk.hw.ref);
/* fall-through */
case 19200:
case 38400:
*pll_params = icl_tbt_pll_19_2MHz_values;
break;
case 24000:
*pll_params = icl_tbt_pll_24MHz_values;
break;
}
}
return true;
}
@ -2607,7 +2645,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
struct intel_dpll_hw_state *state,
bool is_dkl)
{
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
@ -2629,8 +2668,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
continue;
if (div2 >= 2) {
/*
* Note: a_divratio not matching TGL BSpec
* algorithm but matching hardcoded values and
* working on HW for DP alt-mode at least
*/
a_divratio = is_dp ? 10 : 5;
tlinedrv = 2;
tlinedrv = is_dkl ? 1 : 2;
} else {
a_divratio = 5;
tlinedrv = 0;
@ -2693,11 +2737,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
bool is_dkl = INTEL_GEN(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state)) {
pll_state, is_dkl)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
return false;
}
@ -2705,8 +2750,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
if (m2div_int > 255) {
m1div = 4;
m2div_int = dco_khz / (refclk_khz * m1div);
if (!is_dkl) {
m1div = 4;
m2div_int = dco_khz / (refclk_khz * m1div);
}
if (m2div_int > 255) {
DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
clock);
@ -2786,60 +2834,94 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
ssc_steplog = 4;
pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
MG_PLL_DIV0_FBDIV_INT(m2div_int);
/* write pll_state calculations */
if (is_dkl) {
pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
MG_PLL_DIV1_DITHER_DIV_2 |
MG_PLL_DIV1_NDIVRATIO(1) |
MG_PLL_DIV1_FBPREDIV(m1div);
pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
MG_PLL_LF_AFCCNTSEL_512 |
MG_PLL_LF_GAINCTRL(1) |
MG_PLL_LF_INT_COEFF(int_coeff) |
MG_PLL_LF_PROP_COEFF(prop_coeff);
pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
(use_ssc ? DKL_PLL_SSC_EN : 0);
pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
MG_PLL_FRAC_LOCK_DCODITHEREN |
MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
if (use_ssc || m2div_rem > 0)
pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
MG_PLL_SSC_TYPE(2) |
MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
MG_PLL_SSC_STEPNUM(ssc_steplog) |
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
pll_state->mg_pll_tdc_coldst_bias =
DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
MG_PLL_TDC_COLDST_IREFINT_EN |
MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
MG_PLL_TDC_TDCOVCCORR_EN |
MG_PLL_TDC_TDCSEL(3);
pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
MG_PLL_BIAS_BIAS_BONUS(10) |
MG_PLL_BIAS_BIASCAL_EN |
MG_PLL_BIAS_CTRIM(12) |
MG_PLL_BIAS_VREF_RDAC(4) |
MG_PLL_BIAS_IREFTRIM(iref_trim);
if (refclk_khz == 38400) {
pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
pll_state->mg_pll_bias_mask = 0;
} else {
pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
pll_state->mg_pll_bias_mask = -1U;
}
pll_state->mg_pll_div0 =
(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
MG_PLL_DIV0_FBDIV_INT(m2div_int);
pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
pll_state->mg_pll_div1 =
MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
MG_PLL_DIV1_DITHER_DIV_2 |
MG_PLL_DIV1_NDIVRATIO(1) |
MG_PLL_DIV1_FBPREDIV(m1div);
pll_state->mg_pll_lf =
MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
MG_PLL_LF_AFCCNTSEL_512 |
MG_PLL_LF_GAINCTRL(1) |
MG_PLL_LF_INT_COEFF(int_coeff) |
MG_PLL_LF_PROP_COEFF(prop_coeff);
pll_state->mg_pll_frac_lock =
MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
MG_PLL_FRAC_LOCK_DCODITHEREN |
MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
if (use_ssc || m2div_rem > 0)
pll_state->mg_pll_frac_lock |=
MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
pll_state->mg_pll_ssc =
(use_ssc ? MG_PLL_SSC_EN : 0) |
MG_PLL_SSC_TYPE(2) |
MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
MG_PLL_SSC_STEPNUM(ssc_steplog) |
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
pll_state->mg_pll_tdc_coldst_bias =
MG_PLL_TDC_COLDST_COLDSTART |
MG_PLL_TDC_COLDST_IREFINT_EN |
MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
MG_PLL_TDC_TDCOVCCORR_EN |
MG_PLL_TDC_TDCSEL(3);
pll_state->mg_pll_bias =
MG_PLL_BIAS_BIAS_GB_SEL(3) |
MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
MG_PLL_BIAS_BIAS_BONUS(10) |
MG_PLL_BIAS_BIASCAL_EN |
MG_PLL_BIAS_CTRIM(12) |
MG_PLL_BIAS_VREF_RDAC(4) |
MG_PLL_BIAS_IREFTRIM(iref_trim);
if (refclk_khz == 38400) {
pll_state->mg_pll_tdc_coldst_bias_mask =
MG_PLL_TDC_COLDST_COLDSTART;
pll_state->mg_pll_bias_mask = 0;
} else {
pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
pll_state->mg_pll_bias_mask = -1U;
}
pll_state->mg_pll_tdc_coldst_bias &=
pll_state->mg_pll_tdc_coldst_bias_mask;
pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
}
return true;
}
@ -2910,8 +2992,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
has_dpll4 ? DPLL_ID_EHL_DPLL4
: DPLL_ID_ICL_DPLL1);
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
port_name(encoder->port));
DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name);
return false;
}
@ -3086,6 +3168,78 @@ out:
return ret;
}
static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
bool ret = false;
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
val = I915_READ(MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
/*
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
ret = true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state,
@ -3220,6 +3374,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
static void dkl_pll_write(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
u32 val;
/*
* All registers programmed here have the same HIP_INDEX_REG even
* though on different building block
*/
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
/* All the registers are RMW */
val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = I915_READ(DKL_PLL_DIV0(tc_port));
val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
val |= hw_state->mg_pll_div0;
I915_WRITE(DKL_PLL_DIV0(tc_port), val);
val = I915_READ(DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
I915_WRITE(DKL_PLL_DIV1(tc_port), val);
val = I915_READ(DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
I915_WRITE(DKL_PLL_SSC(tc_port), val);
val = I915_READ(DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
I915_WRITE(DKL_PLL_BIAS(tc_port), val);
val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
@ -3312,7 +3535,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
icl_mg_pll_write(dev_priv, pll);
if (INTEL_GEN(dev_priv) >= 12)
dkl_pll_write(dev_priv, pll);
else
icl_mg_pll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@ -3467,11 +3693,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
};
static const struct dpll_info tgl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
/* TODO: Add typeC plls */
{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
{ },
};
@ -3479,6 +3716,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};

View File

@ -0,0 +1,332 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*
*/
#include "i915_drv.h"
#include "intel_display_types.h"
#define DSB_BUF_SIZE (2 * PAGE_SIZE)
/**
* DOC: DSB
*
* A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
* which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
* engine that can be programmed to download the DSB from memory.
* It allows driver to batch submit display HW programming. This helps to
* reduce loading time and CPU activity, thereby making the context switch
* faster. DSB Support added from Gen12 Intel graphics based platform.
*
* DSB's can access only the pipe, plane, and transcoder Data Island Packet
* registers.
*
* DSB HW can support only register writes (both indexed and direct MMIO
* writes). There are no registers reads possible with DSB HW engine.
*/
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
#define DSB_OPCODE_MMIO_WRITE 0x1
#define DSB_OPCODE_INDEXED_WRITE 0x9
#define DSB_BYTE_EN 0xF
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
static inline bool is_dsb_busy(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
}
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
DRM_DEBUG_KMS("DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
POSTING_READ(DSB_CTRL(pipe, dsb->id));
return true;
}
static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
DRM_DEBUG_KMS("DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
POSTING_READ(DSB_CTRL(pipe, dsb->id));
return true;
}
/**
* intel_dsb_get() - Allocate DSB context and return a DSB instance.
* @crtc: intel_crtc structure to get pipe info.
*
* This function provides handle of a DSB instance, for the further DSB
* operations.
*
* Returns: address of Intel_dsb instance requested for.
* Failure: Returns the same DSB instance, but without a command buffer.
*/
struct intel_dsb *
intel_dsb_get(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_dsb *dsb = &crtc->dsb;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return dsb;
if (atomic_add_return(1, &dsb->refcount) != 1)
return dsb;
dsb->id = DSB1;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Gem object creation failed\n");
goto err;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
DRM_ERROR("Vma creation failed\n");
i915_gem_object_put(obj);
atomic_dec(&dsb->refcount);
goto err;
}
dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(dsb->cmd_buf)) {
DRM_ERROR("Command buffer creation failed\n");
i915_vma_unpin_and_release(&vma, 0);
dsb->cmd_buf = NULL;
atomic_dec(&dsb->refcount);
goto err;
}
dsb->vma = vma;
err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return dsb;
}
/**
* intel_dsb_put() - To destroy DSB context.
* @dsb: intel_dsb structure.
*
* This function destroys the DSB context allocated by a dsb_get(), by
* unpinning and releasing the VMA object associated with it.
*/
void intel_dsb_put(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!HAS_DSB(i915))
return;
if (WARN_ON(atomic_read(&dsb->refcount) == 0))
return;
if (atomic_dec_and_test(&dsb->refcount)) {
i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
dsb->cmd_buf = NULL;
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
}
}
/**
* intel_dsb_indexed_reg_write() -Write to the DSB context for auto
* increment register.
* @dsb: intel_dsb structure.
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
* buffer of DSB for auto-increment register. During command buffer overflow,
* a warning is thrown and rest all erroneous condition register programming
* is done through mmio write.
*/
void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 val)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 *buf = dsb->cmd_buf;
u32 reg_val;
if (!buf) {
I915_WRITE(reg, val);
return;
}
if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
DRM_DEBUG_KMS("DSB buffer overflow\n");
return;
}
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
* +--------------------------------------------------------+
* | size = 3 | offset &| value1 | value2 | value3 | zero |
* | | opcode | | | | |
* +--------------------------------------------------------+
* + + + + + + +
* 0 4 8 12 16 20 24
* Byte
*
* As every instruction is 8 byte aligned the index of dsb instruction
* will start always from even number while dealing with u32 array. If
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
if (reg_val != i915_mmio_reg_offset(reg)) {
/* Every instruction should be 8 byte aligned. */
dsb->free_pos = ALIGN(dsb->free_pos, 2);
dsb->ins_start_offset = dsb->free_pos;
/* Update the size. */
buf[dsb->free_pos++] = 1;
/* Update the opcode and reg. */
buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
DSB_OPCODE_SHIFT) |
i915_mmio_reg_offset(reg);
/* Update the value. */
buf[dsb->free_pos++] = val;
} else {
/* Update the new value. */
buf[dsb->free_pos++] = val;
/* Update the size. */
buf[dsb->ins_start_offset]++;
}
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
buf[dsb->free_pos] = 0;
}
/**
* intel_dsb_reg_write() -Write to the DSB context for normal
* register.
* @dsb: intel_dsb structure.
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
* buffer of DSB. During command buffer overflow, a warning is thrown
* and rest all erroneous condition register programming is done
* through mmio write.
*/
void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 *buf = dsb->cmd_buf;
if (!buf) {
I915_WRITE(reg, val);
return;
}
if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
DRM_DEBUG_KMS("DSB buffer overflow\n");
return;
}
dsb->ins_start_offset = dsb->free_pos;
buf[dsb->free_pos++] = val;
buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
i915_mmio_reg_offset(reg);
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
* @dsb: intel_dsb structure.
*
* This function is used to do actual write to hardware using DSB.
* On errors, fall back to MMIO. Also this function help to reset the context.
*/
void intel_dsb_commit(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 tail;
if (!dsb->free_pos)
return;
if (!intel_dsb_enable_engine(dsb))
goto reset;
if (is_dsb_busy(dsb)) {
DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
if (tail > dsb->free_pos * 4)
memset(&dsb->cmd_buf[dsb->free_pos], 0,
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
i915_ggtt_offset(dsb->vma), tail);
I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
DRM_ERROR("Timed out waiting for DSB workload completion.\n");
goto reset;
}
reset:
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_dsb_disable_engine(dsb);
}

View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef _INTEL_DSB_H
#define _INTEL_DSB_H
#include <linux/types.h>
#include "i915_reg.h"
struct intel_crtc;
struct i915_vma;
enum dsb_id {
INVALID_DSB = -1,
DSB1,
DSB2,
DSB3,
MAX_DSB_PER_PIPE
};
struct intel_dsb {
atomic_t refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
/*
* free_pos will point the first free entry position
* and help in calculating tail of command buffer.
*/
int free_pos;
/*
* ins_start_offset will help to store start address of the dsb
* instuction and help in identifying the batch of auto-increment
* register.
*/
u32 ins_start_offset;
};
struct intel_dsb *
intel_dsb_get(struct intel_crtc *crtc);
void intel_dsb_put(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 val);
void intel_dsb_commit(struct intel_dsb *dsb);
#endif

View File

@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
}
return MODE_OK;
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,

View File

@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
enum pipe pipe = crtc->pipe;
u32 dvo_val;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:

View File

@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
if (IS_GEN(dev_priv, 11))
/* Wa_1409120013:icl,ehl */
if (INTEL_GEN(dev_priv) >= 11)
/* Wa_1409120013:icl,ehl,tgl */
I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->enabled = false;
fbc->active = false;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;

View File

@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
obj = NULL;
obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
@ -204,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
mutex_lock(&dev->struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
@ -266,7 +265,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@ -274,7 +272,6 @@ out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@ -291,11 +288,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
if (ifbdev->vma)
intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
}
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
@ -444,7 +438,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
if (WARN_ON(!HAS_DISPLAY(dev_priv)))
if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);

View File

@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@ -220,11 +221,18 @@ static void frontbuffer_release(struct kref *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
struct drm_i915_gem_object *obj = front->obj;
struct i915_vma *vma;
front->obj->frontbuffer = NULL;
spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
spin_unlock(&obj->vma.lock);
i915_gem_object_put(front->obj);
obj->frontbuffer = NULL;
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
i915_gem_object_put(obj);
kfree(front);
}
@ -249,8 +257,9 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
front->obj = obj;
kref_init(&front->ref);
atomic_set(&front->bits, 0);
i915_active_init(i915, &front->write,
frontbuffer_active, frontbuffer_retire);
i915_active_init(&front->write,
frontbuffer_active,
i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) {

View File

@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
if (!HAS_DISPLAY(dev_priv))
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))

View File

@ -1,9 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017 Google, Inc.
* Copyright _ 2017-2019, Intel Corporation.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
* Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/component.h>
@ -18,6 +20,7 @@
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@ -105,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
static inline bool intel_hdcp_in_use(struct intel_connector *connector)
static inline
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum port port = connector->encoder->port;
u32 reg;
reg = I915_READ(PORT_HDCP_STATUS(port));
return reg & HDCP_STATUS_ENC;
return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
static inline
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum port port = connector->encoder->port;
u32 reg;
reg = I915_READ(HDCP2_STATUS_DDI(port));
return reg & LINK_ENCRYPTION_STATUS;
return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
}
static
u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
enum port port = intel_dig_port->base.port;
if (INTEL_GEN(dev_priv) >= 12) {
switch (cpu_transcoder) {
case TRANSCODER_A:
return HDCP_TRANSA_REP_PRESENT |
HDCP_TRANSA_SHA1_M0;
case TRANSCODER_B:
return HDCP_TRANSB_REP_PRESENT |
HDCP_TRANSB_SHA1_M0;
case TRANSCODER_C:
return HDCP_TRANSC_REP_PRESENT |
HDCP_TRANSC_SHA1_M0;
case TRANSCODER_D:
return HDCP_TRANSD_REP_PRESENT |
HDCP_TRANSD_SHA1_M0;
default:
DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
return -EINVAL;
}
}
switch (port) {
case PORT_A:
return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
break;
DRM_ERROR("Unknown port %d\n", port);
return -EINVAL;
}
DRM_ERROR("Unknown port %d\n", port);
return -EINVAL;
}
static
int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct drm_i915_private *dev_priv;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
@ -548,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* V prime atleast twice.
*/
for (i = 0; i < tries; i++) {
ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
ret = intel_hdcp_validate_v_prime(connector, shim,
ksv_fifo, num_downstream,
bstatus);
if (!ret)
@ -576,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
@ -615,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
get_random_u32());
I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@ -644,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return -EPERM;
}
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
I915_WRITE(HDCP_REP_CTL,
intel_hdcp_get_repeater_ctl(intel_dig_port));
intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
@ -689,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
I915_READ(PORT_HDCP_STATUS(port)));
I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@ -729,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@ -808,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@ -819,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
if (WARN_ON(!intel_hdcp_in_use(connector))) {
if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
I915_READ(PORT_HDCP_STATUS(port)));
I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@ -1493,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@ -1506,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
}
if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) |
I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
port)) |
CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
ret = intel_de_wait_for_set(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@ -1526,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS));
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
~CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
if (WARN_ON(!intel_hdcp2_in_use(connector))) {
if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
I915_READ(HDCP2_STATUS_DDI(port)));
I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@ -1749,13 +1797,71 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
static inline
enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
return MEI_DDI_A;
case PORT_B ... PORT_F:
return (enum mei_fw_ddi)port;
default:
return MEI_DDI_INVALID_PORT;
}
}
static inline
enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
return (enum mei_fw_tc)(cpu_transcoder | 0x10);
default: /* eDP, DSI TRANSCODERS are non HDCP capable */
return MEI_INVALID_TRANSCODER;
}
}
void intel_hdcp_transcoder_config(struct intel_connector *connector,
enum transcoder cpu_transcoder)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
if (!hdcp->shim)
return;
if (INTEL_GEN(dev_priv) >= 12) {
mutex_lock(&hdcp->mutex);
hdcp->cpu_transcoder = cpu_transcoder;
hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
mutex_unlock(&hdcp->mutex);
}
}
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
data->port = connector->encoder->port;
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi =
intel_get_mei_fw_ddi_index(connector->encoder->port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
* with zero(INVALID PORT index).
*/
data->fw_ddi = MEI_DDI_INVALID_PORT;
/*
* As associated transcoder is set and modified at modeset, here fw_tc
* is initialized to zero (invalid transcoder index). This will be
* retained for <Gen12 forever.
*/
data->fw_tc = MEI_INVALID_TRANSCODER;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;

View File

@ -15,10 +15,14 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
struct intel_hdcp_shim;
enum port;
enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
void intel_hdcp_transcoder_config(struct intel_connector *connector,
enum transcoder cpu_transcoder);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);

View File

@ -724,11 +724,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
drm_hdmi_avi_infoframe_quant_range(frame, connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
/* nonsense combination */
WARN_ON(crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
drm_hdmi_avi_infoframe_quant_range(frame, connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
} else {
frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
}
drm_hdmi_avi_infoframe_content_type(frame, conn_state);
@ -1491,7 +1500,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
intel_dig_port->base.base.dev->dev_private;
struct intel_connector *connector =
intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
u32 reg;
@ -1502,13 +1514,14 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
I915_READ(PORT_HDCP_STATUS(port)));
I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
port)));
return false;
}
return true;
@ -2184,8 +2197,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
true, force_dvi);
}
if (status != MODE_OK)
return status;
return status;
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
@ -2261,9 +2276,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config,
int *clock_12bpc, int *clock_10bpc,
int *clock_8bpc)
struct intel_crtc_state *config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@ -2272,11 +2285,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return false;
}
/* YCBCR420 TMDS rate requirement is half the pixel clock */
config->port_clock /= 2;
*clock_12bpc /= 2;
*clock_10bpc /= 2;
*clock_8bpc /= 2;
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
@ -2291,6 +2299,104 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
static int intel_hdmi_port_clock(int clock, int bpc)
{
/*
* Need to adjust the port link by:
* 1.5x for 12bpc
* 1.25x for 10bpc
*/
return clock * bpc / 8;
}
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock, bool force_dvi)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int bpc;
for (bpc = 12; bpc >= 10; bpc -= 2) {
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
intel_hdmi_port_clock(clock, bpc),
true, force_dvi) == MODE_OK)
return bpc;
}
return 8;
}
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool force_dvi)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
/* YCBCR420 TMDS rate requirement is half the pixel clock */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
clock, force_dvi);
crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
/*
* pipe_bpp could already be below 8bpc due to
* FDI bandwidth constraints. We shouldn't bump it
* back up to 8bpc in that case.
*/
if (crtc_state->pipe_bpp > bpc * 3)
crtc_state->pipe_bpp = bpc * 3;
DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
false, force_dvi) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
crtc_state->port_clock);
return -EINVAL;
}
return 0;
}
static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
/*
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
* some conflicting bits in PIPECONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
return crtc_state->has_hdmi_sink &&
drm_default_rgb_quant_range(adjusted_mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
}
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@ -2302,11 +2408,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_10bpc = clock_8bpc * 5 / 4;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@ -2317,33 +2420,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
pipe_config->limited_color_range =
pipe_config->has_hdmi_sink &&
drm_default_rgb_quant_range(adjusted_mode) ==
HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
clock_8bpc *= 2;
clock_10bpc *= 2;
clock_12bpc *= 2;
}
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
&clock_12bpc, &clock_10bpc,
&clock_8bpc)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return -EINVAL;
}
}
pipe_config->limited_color_range =
intel_hdmi_limited_color_range(pipe_config, conn_state);
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@ -2355,43 +2444,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
/*
* Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
* to check that the higher clock still fits within limits.
*/
if (hdmi_deep_color_possible(pipe_config, 12) &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
} else if (hdmi_deep_color_possible(pipe_config, 10) &&
hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
desired_bpp = 10 * 3;
/* Need to adjust the port link by 1.25x for 10bpc. */
pipe_config->port_clock = clock_10bpc;
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
pipe_config->port_clock = clock_8bpc;
}
if (!pipe_config->bw_constrained) {
DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
pipe_config->pipe_bpp = desired_bpp;
}
if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
false, force_dvi) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
return -EINVAL;
}
ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
if (ret)
return ret;
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
@ -2431,6 +2486,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
pipe_config->cpu_transcoder);
return 0;
}
@ -3001,7 +3059,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@ -3068,12 +3126,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
intel_dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
@ -3210,11 +3269,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
intel_encoder->crtc_mask = BIT(PIPE_C);
else
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*

View File

@ -23,6 +23,7 @@ struct intel_crtc_state;
struct intel_hdmi;
struct drm_connector_state;
union hdmi_infoframe;
enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);

View File

@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
queue_dig = true;

View File

@ -13,6 +13,7 @@
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,

View File

@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;

View File

@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int pipe = crtc->pipe;
enum pipe pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev_priv)) {
@ -900,11 +900,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
else if (IS_GEN(dev_priv, 4))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
else
intel_encoder->crtc_mask = (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_B);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;

View File

@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
if (IS_ERR(rq))
return rq;
err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
err = i915_active_add_request(&overlay->last_flip, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
@ -439,8 +439,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/*
* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@ -751,7 +749,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
@ -852,7 +849,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
@ -1068,11 +1064,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@ -1088,7 +1080,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
@ -1152,14 +1143,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
return 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
@ -1233,7 +1222,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
@ -1290,7 +1278,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@ -1303,15 +1290,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
struct i915_vma *vma;
int err;
mutex_lock(&i915->drm.struct_mutex);
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (obj == NULL)
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_unlock;
}
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@ -1332,13 +1315,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -1367,8 +1347,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
i915_active_init(dev_priv,
&overlay->last_flip,
i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));

View File

@ -88,48 +88,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
static int edp_psr_shift(enum transcoder cpu_transcoder)
static void psr_irq_control(struct drm_i915_private *dev_priv)
{
switch (cpu_transcoder) {
case TRANSCODER_A:
return EDP_PSR_TRANSCODER_A_SHIFT;
case TRANSCODER_B:
return EDP_PSR_TRANSCODER_B_SHIFT;
case TRANSCODER_C:
return EDP_PSR_TRANSCODER_C_SHIFT;
default:
MISSING_CASE(cpu_transcoder);
/* fallthrough */
case TRANSCODER_EDP:
return EDP_PSR_TRANSCODER_EDP_SHIFT;
}
}
enum transcoder trans_shift;
u32 mask, val;
i915_reg_t imr_reg;
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
{
u32 debug_mask, mask;
enum transcoder cpu_transcoder;
u32 transcoders = BIT(TRANSCODER_EDP);
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
BIT(TRANSCODER_B) |
BIT(TRANSCODER_C);
debug_mask = 0;
mask = 0;
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
int shift = edp_psr_shift(cpu_transcoder);
mask |= EDP_PSR_ERROR(shift);
debug_mask |= EDP_PSR_POST_EXIT(shift) |
EDP_PSR_PRE_ENTRY(shift);
/*
* gen12+ has registers relative to transcoder and one per transcoder
* using the same bit definition: handle it as TRANSCODER_EDP to force
* 0 shift in bit definition
*/
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
} else {
trans_shift = dev_priv->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
if (debug & I915_PSR_DEBUG_IRQ)
mask |= debug_mask;
mask = EDP_PSR_ERROR(trans_shift);
if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= EDP_PSR_POST_EXIT(trans_shift) |
EDP_PSR_PRE_ENTRY(trans_shift);
I915_WRITE(EDP_PSR_IMR, ~mask);
/* Warning: it is masking/setting reserved bits too */
val = I915_READ(imr_reg);
val &= ~EDP_PSR_TRANS_MASK(trans_shift);
val |= ~mask;
I915_WRITE(imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@ -171,60 +158,58 @@ static void psr_event_print(u32 val, bool psr2_enabled)
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
enum transcoder trans_shift;
i915_reg_t imr_reg;
ktime_t time_ns = ktime_get();
u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
BIT(TRANSCODER_B) |
BIT(TRANSCODER_C);
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
} else {
trans_shift = dev_priv->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
int shift = edp_psr_shift(cpu_transcoder);
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_ERROR(shift)) {
DRM_WARN("[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
dev_priv->psr.last_exit = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
dev_priv->psr.irq_aux_error = true;
if (INTEL_GEN(dev_priv) >= 9) {
u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
bool psr2_enabled = dev_priv->psr.psr2_enabled;
/*
* If this interruption is not masked it will keep
* interrupting so fast that it prevents the scheduled
* work to run.
* Also after a PSR error, we don't want to arm PSR
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
mask |= EDP_PSR_ERROR(shift);
}
if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
dev_priv->psr.last_exit = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 9) {
u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
bool psr2_enabled = dev_priv->psr.psr2_enabled;
I915_WRITE(PSR_EVENT(cpu_transcoder), val);
psr_event_print(val, psr2_enabled);
}
I915_WRITE(PSR_EVENT(cpu_transcoder), val);
psr_event_print(val, psr2_enabled);
}
}
if (mask) {
mask |= I915_READ(EDP_PSR_IMR);
I915_WRITE(EDP_PSR_IMR, mask);
if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
u32 val;
DRM_WARN("[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
dev_priv->psr.irq_aux_error = true;
/*
* If this interruption is not masked it will keep
* interrupting so fast that it prevents the scheduled
* work to run.
* Also after a PSR error, we don't want to arm PSR
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
val = I915_READ(imr_reg);
val |= EDP_PSR_ERROR(trans_shift);
I915_WRITE(imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@ -283,6 +268,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
if (dev_priv->psr.dp) {
DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
return;
}
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@ -305,7 +295,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
WARN_ON(dev_priv->psr.dp);
dev_priv->psr.dp = intel_dp;
if (INTEL_GEN(dev_priv) >= 9 &&
@ -390,7 +379,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@ -401,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@ -491,8 +480,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@ -528,9 +518,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
I915_WRITE(EDP_PSR_CTL, 0);
I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
I915_WRITE(EDP_PSR2_CTL, val);
I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
if (INTEL_GEN(dev_priv) < 9)
return false;
else if (INTEL_GEN(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
@ -544,6 +545,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
transcoder_name(crtc_state->cpu_transcoder));
return false;
}
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
@ -554,7 +561,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
} else if (IS_GEN(dev_priv, 9)) {
@ -606,10 +616,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms with DDI implementation of PSR have different
* PSR registers per transcoder and we only implement transcoder EDP
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
* for now it only supports one instance of PSR, so lets keep it
* hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
@ -648,9 +657,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@ -720,19 +730,44 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
I915_WRITE(EDP_PSR_DEBUG, mask);
I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
psr_irq_control(dev_priv);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
u32 val;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
* irq_preinstall and irq_uninstall hooks.
* And enabling in this situation cause the screen to freeze in the
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
if (INTEL_GEN(dev_priv) >= 12) {
val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
val = I915_READ(EDP_PSR_IIR);
val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
}
if (val) {
dev_priv->psr.sink_not_reliable = true;
DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
return;
}
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@ -782,20 +817,27 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
u32 val;
if (!dev_priv->psr.active) {
if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(val & EDP_PSR2_ENABLE);
}
val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(val & EDP_PSR_ENABLE);
return;
}
if (dev_priv->psr.psr2_enabled) {
val = I915_READ(EDP_PSR2_CTL);
val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR2_ENABLE));
I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
val &= ~EDP_PSR2_ENABLE;
I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
val = I915_READ(EDP_PSR_CTL);
val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
val &= ~EDP_PSR_ENABLE;
I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@ -817,10 +859,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(dev_priv);
if (dev_priv->psr.psr2_enabled) {
psr_status = EDP_PSR2_STATUS;
psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
psr_status = EDP_PSR_STATUS;
psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@ -963,7 +1005,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
return __intel_wait_for_register(&dev_priv->uncore,
EDP_PSR_STATUS(dev_priv->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@ -979,10 +1022,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return false;
if (dev_priv->psr.psr2_enabled) {
reg = EDP_PSR2_STATUS;
reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
reg = EDP_PSR_STATUS;
reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@ -1067,7 +1110,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
/*
* Do it right away if it's already enabled, otherwise it will be done
* when enabling the source.
*/
if (dev_priv->psr.enabled)
psr_irq_control(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
@ -1208,42 +1257,30 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
u32 val;
if (!HAS_PSR(dev_priv))
return;
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
if (!dev_priv->psr.sink_support)
return;
if (IS_HASWELL(dev_priv))
/*
* HSW don't have PSR registers on the same space as transcoder
* so set this to a value that when subtract to the register
* in transcoder space results in the right offset for HSW
*/
dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
if (i915_modparams.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
* irq_preinstall and irq_uninstall hooks.
* And enabling in this situation cause the screen to freeze in the
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
val = I915_READ(EDP_PSR_IIR);
val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
if (val) {
DRM_DEBUG_KMS("PSR interruption error set\n");
dev_priv->psr.sink_not_reliable = true;
}
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
else
/* For new platforms let's respect VBT back again */
else if (INTEL_GEN(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);

View File

@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,

View File

@ -2921,7 +2921,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
bytes[0], bytes[1]);
return false;
}
intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_sdvo->base.crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
return true;
}

View File

@ -14,6 +14,7 @@
struct drm_i915_private;
enum pipe;
enum port;
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);

View File

@ -48,19 +48,6 @@
#include "intel_psr.h"
#include "intel_sprite.h"
bool is_planar_yuv_format(u32 pixelformat)
{
switch (pixelformat) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
return true;
default:
return false;
}
}
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@ -361,6 +348,7 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
@ -381,7 +369,7 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
if (drm_format_info_is_yuv_semiplanar(fb->format) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@ -554,7 +542,7 @@ skl_program_plane(struct intel_plane *plane,
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
struct intel_plane *linked = plane_state->linked_plane;
struct intel_plane *linked = plane_state->planar_linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
u32 plane_color_ctl = 0;
@ -653,7 +641,7 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
if (plane_state->linked_plane) {
if (plane_state->planar_linked_plane) {
/* Program the UV plane */
color_plane = 1;
}
@ -1791,7 +1779,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@ -1818,7 +1806,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
max_scale = skl_max_scale(crtc_state, fb->format->format);
max_scale = skl_max_scale(crtc_state, fb->format);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@ -2158,6 +2146,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
static const u64 gen12_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@ -2306,6 +2301,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
break;
default:
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
case DRM_FORMAT_XVYU2101010:
case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_Y210:
case DRM_FORMAT_Y212:
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
return true;
/* fall through */
default:
return false;
}
}
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@ -2342,6 +2386,15 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
static const struct drm_plane_funcs gen12_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = gen12_plane_format_mod_supported,
};
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@ -2430,6 +2483,7 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
static const struct drm_plane_funcs *plane_funcs;
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
@ -2472,11 +2526,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
if (INTEL_GEN(dev_priv) >= 12) {
/* TODO: Implement support for gen-12 CCS modifiers */
plane->has_ccs = false;
modifiers = gen12_plane_format_modifiers_noccs;
plane_funcs = &gen12_plane_funcs;
} else {
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
plane_funcs = &skl_plane_funcs;
}
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
@ -2486,7 +2548,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, &skl_plane_funcs,
possible_crtcs, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@ -2519,6 +2581,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@ -2540,7 +2604,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
const u64 *modifiers;
const u32 *formats;
int num_formats;
int ret;
int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@ -2630,6 +2694,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
zpos = sprite + 1;
drm_plane_create_zpos_immutable_property(&plane->base, zpos);
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;

View File

@ -17,7 +17,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
bool is_planar_yuv_format(u32 pixelformat);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,

View File

@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
static bool has_modular_fia(struct drm_i915_private *i915)
static void
tc_port_load_fia_params(struct drm_i915_private *i915,
struct intel_digital_port *dig_port)
{
if (!INTEL_INFO(i915)->display.has_modular_fia)
return false;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
u32 modular_fia;
return intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
}
static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
enum tc_port tc_port)
{
if (!has_modular_fia(i915))
return FIA1;
if (INTEL_INFO(i915)->display.has_modular_fia) {
modular_fia = intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1));
modular_fia &= MODULAR_FIA_MASK;
} else {
modular_fia = 0;
}
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
return tc_port / 2;
if (modular_fia) {
dig_port->tc_phy_fia = tc_port / 2;
dig_port->tc_phy_fia_idx = tc_port % 2;
} else {
dig_port->tc_phy_fia = FIA1;
dig_port->tc_phy_fia_idx = tc_port;
}
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
@ -57,8 +63,23 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
WARN_ON(lane_mask == 0xffffffff);
return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
DP_LANE_ASSIGNMENT_SHIFT(tc_port);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
u32 pin_mask;
pin_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
WARN_ON(pin_mask == 0xffffffff);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
@ -95,7 +116,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@ -104,19 +124,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
switch (required_lanes) {
case 1:
val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
DFLEXDPMLE1_DPMLETC_ML0(tc_port);
val |= lane_reversal ?
DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
break;
case 2:
val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
val |= lane_reversal ?
DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
break;
case 4:
val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
break;
default:
MISSING_CASE(required_lanes);
@ -164,9 +186,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
if (val & TC_LIVE_STATE_TBT(tc_port))
if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_TBT_ALT);
if (val & TC_LIVE_STATE_TC(tc_port))
if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
@ -182,7 +204,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@ -194,14 +215,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return false;
}
return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
bool enable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@ -215,9 +235,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
return false;
}
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
if (!enable)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
@ -232,7 +252,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@ -244,7 +263,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
return true;
}
return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
}
/*
@ -540,5 +559,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
mutex_init(&dig_port->tc_lock);
dig_port->tc_legacy_port = is_legacy;
dig_port->tc_link_refcount = 0;
dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
tc_port_load_fia_params(i915, dig_port);
}

View File

@ -13,6 +13,7 @@ struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);

View File

@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
return MODE_CLOCK_RANGE;
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static int
@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */

View File

@ -0,0 +1,160 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_vga.h"
static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return VLV_VGACNTRL;
else if (INTEL_GEN(i915) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
}
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
u8 sr1;
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1 << 5, VGA_SR_DATA);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
{
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
intel_vga_disable(dev_priv);
}
}
void intel_vga_redisable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
/*
* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses.
*/
wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
if (!wakeref)
return;
intel_vga_redisable_power_on(i915);
intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
}
void intel_vga_reset_io_mem(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
static int
intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
DRM_ERROR("failed to read control word\n");
return -EIO;
}
if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
return 0;
if (enable_decode)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
DRM_ERROR("failed to write control word\n");
return -EIO;
}
return 0;
}
static unsigned int
intel_vga_set_decode(void *cookie, bool enable_decode)
{
struct drm_i915_private *i915 = cookie;
intel_vga_set_state(i915, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
int intel_vga_register(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
int ret;
/*
* If we have > 1 VGA cards, then we need to arbitrate access to the
* common VGA resources.
*
* If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode);
if (ret && ret != -ENODEV)
return ret;
return 0;
}
void intel_vga_unregister(struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
vga_client_register(pdev, NULL, NULL, NULL);
}

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_VGA_H__
#define __INTEL_VGA_H__
struct drm_i915_private;
void intel_vga_reset_io_mem(struct drm_i915_private *i915);
void intel_vga_disable(struct drm_i915_private *i915);
void intel_vga_redisable(struct drm_i915_private *i915);
void intel_vga_redisable_power_on(struct drm_i915_private *i915);
int intel_vga_register(struct drm_i915_private *i915);
void intel_vga_unregister(struct drm_i915_private *i915);
#endif /* __INTEL_VGA_H__ */

View File

@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;

View File

@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
goto out_unlock;
goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
@ -211,7 +208,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
err = i915_active_ref(&vma->active, rq->timeline, rq);
err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
@ -229,8 +226,6 @@ out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);

View File

@ -167,97 +167,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
{
unsigned int max;
lockdep_assert_held(&i915->contexts.mutex);
if (INTEL_GEN(i915) >= 12)
max = GEN12_MAX_CONTEXT_HW_ID;
else if (INTEL_GEN(i915) >= 11)
max = GEN11_MAX_CONTEXT_HW_ID;
else if (USES_GUC_SUBMISSION(i915))
/*
* When using GuC in proxy submission, GuC consumes the
* highest bit in the context id to indicate proxy submission.
*/
max = MAX_GUC_CONTEXT_HW_ID;
else
max = MAX_CONTEXT_HW_ID;
return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
}
static int steal_hw_id(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx, *cn;
LIST_HEAD(pinned);
int id = -ENOSPC;
lockdep_assert_held(&i915->contexts.mutex);
list_for_each_entry_safe(ctx, cn,
&i915->contexts.hw_id_list, hw_id_link) {
if (atomic_read(&ctx->hw_id_pin_count)) {
list_move_tail(&ctx->hw_id_link, &pinned);
continue;
}
GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
list_del_init(&ctx->hw_id_link);
id = ctx->hw_id;
break;
}
/*
* Remember how far we got up on the last repossesion scan, so the
* list is kept in a "least recently scanned" order.
*/
list_splice_tail(&pinned, &i915->contexts.hw_id_list);
return id;
}
static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
{
int ret;
lockdep_assert_held(&i915->contexts.mutex);
/*
* We prefer to steal/stall ourselves and our users over that of the
* entire system. That may be a little unfair to our users, and
* even hurt high priority clients. The choice is whether to oomkill
* something else, or steal a context id.
*/
ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0)) {
ret = steal_hw_id(i915);
if (ret < 0) /* once again for the correct errno code */
ret = new_hw_id(i915, GFP_KERNEL);
if (ret < 0)
return ret;
}
*out = ret;
return 0;
}
static void release_hw_id(struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = ctx->i915;
if (list_empty(&ctx->hw_id_link))
return;
mutex_lock(&i915->contexts.mutex);
if (!list_empty(&ctx->hw_id_link)) {
ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
list_del_init(&ctx->hw_id_link);
}
mutex_unlock(&i915->contexts.mutex);
}
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@ -309,12 +218,11 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
release_hw_id(ctx);
if (ctx->vm)
i915_vm_put(ctx->vm);
spin_lock(&ctx->i915->gem.contexts.lock);
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
@ -325,70 +233,55 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
static void contexts_free(struct drm_i915_private *i915)
static void contexts_free_all(struct llist_node *list)
{
struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
lockdep_assert_held(&i915->drm.struct_mutex);
llist_for_each_entry_safe(ctx, cn, freed, free_link)
llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
static void contexts_free_first(struct drm_i915_private *i915)
static void contexts_flush_free(struct i915_gem_contexts *gc)
{
struct i915_gem_context *ctx;
struct llist_node *freed;
lockdep_assert_held(&i915->drm.struct_mutex);
freed = llist_del_first(&i915->contexts.free_list);
if (!freed)
return;
ctx = container_of(freed, typeof(*ctx), free_link);
i915_gem_context_free(ctx);
contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), contexts.free_work);
struct i915_gem_contexts *gc =
container_of(work, typeof(*gc), free_work);
mutex_lock(&i915->drm.struct_mutex);
contexts_free(i915);
mutex_unlock(&i915->drm.struct_mutex);
contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
struct drm_i915_private *i915 = ctx->i915;
struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
if (llist_add(&ctx->free_link, &i915->contexts.free_list))
queue_work(i915->wq, &i915->contexts.free_work);
if (llist_add(&ctx->free_link, &gc->free_list))
schedule_work(&gc->free_work);
}
static void context_close(struct i915_gem_context *ctx)
{
mutex_lock(&ctx->mutex);
struct i915_address_space *vm;
i915_gem_context_set_closed(ctx);
ctx->file_priv = ERR_PTR(-EBADF);
/*
* This context will never again be assinged to HW, so we can
* reuse its ID for the next context.
*/
release_hw_id(ctx);
mutex_lock(&ctx->mutex);
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
ctx->file_priv = ERR_PTR(-EBADF);
/*
* The LUT uses the VMA as a backpointer to unref the object,
@ -414,7 +307,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@ -428,7 +320,6 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@ -441,6 +332,10 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
return ctx;
err_free:
@ -470,11 +365,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
struct i915_address_space *old = i915_gem_context_vm(ctx);
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
ctx->vm = i915_vm_get(vm);
rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@ -483,12 +378,12 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
if (vm == ctx->vm)
if (vm == rcu_access_pointer(ctx->vm))
return;
vm = __set_ppgtt(ctx, vm);
if (vm)
i915_vm_put(vm);
i915_vm_close(vm);
}
static void __set_timeline(struct intel_timeline **dst,
@ -515,27 +410,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
}
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
!HAS_EXECLISTS(dev_priv))
!HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
/* Reap the most stale context */
contexts_free_first(dev_priv);
/* Reap the stale contexts */
contexts_flush_free(&i915->gem.contexts);
ctx = __create_context(dev_priv);
ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv);
ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@ -543,14 +436,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);
i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
timeline = intel_timeline_create(&dev_priv->gt, NULL);
timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@ -582,18 +478,11 @@ struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
int err;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
err = i915_gem_context_pin_hw_id(ctx);
if (err) {
destroy_kernel_context(&ctx);
return ERR_PTR(err);
}
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
@ -602,62 +491,41 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
static void init_contexts(struct drm_i915_private *i915)
static void init_contexts(struct i915_gem_contexts *gc)
{
mutex_init(&i915->contexts.mutex);
INIT_LIST_HEAD(&i915->contexts.list);
spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&gc->list);
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&i915->contexts.hw_ida);
INIT_LIST_HEAD(&i915->contexts.hw_id_list);
INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
init_llist_head(&i915->contexts.free_list);
INIT_WORK(&gc->free_work, contexts_free_worker);
init_llist_head(&gc->free_list);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
int i915_gem_init_contexts(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(i915->kernel_context);
init_contexts(dev_priv);
init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx);
}
/*
* For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id. Kernel contexts are
* permanently pinned, so that we never suffer a stall and can
* use them from any allocation context (e.g. for evicting other
* contexts and from inside the shrinker).
*/
GEM_BUG_ON(ctx->hw_id);
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ?
DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
return 0;
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
ida_destroy(&i915->contexts.hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
@ -675,11 +543,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv)
{
struct i915_address_space *vm;
int ret;
ctx->file_priv = fpriv;
if (ctx->vm)
ctx->vm->file = fpriv;
mutex_lock(&ctx->mutex);
vm = i915_gem_context_vm(ctx);
if (vm)
WRITE_ONCE(vm->file, fpriv); /* XXX */
mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@ -716,9 +589,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1);
mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@ -746,6 +617,7 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@ -754,6 +626,8 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock);
contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@ -846,6 +720,7 @@ struct context_barrier_task {
void *data;
};
__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@ -865,20 +740,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data),
void *data)
{
struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct intel_context *ce;
int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
i915_active_init(i915, &cb->base, NULL, cb_retire);
i915_active_init(&cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
@ -910,7 +783,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
err = i915_active_ref(&cb->base, rq->timeline, rq);
err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
if (err)
@ -933,16 +806,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm;
int ret;
if (!ctx->vm)
if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
/* XXX rcu acquire? */
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (ret)
return ret;
rcu_read_lock();
vm = i915_vm_get(ctx->vm);
mutex_unlock(&ctx->i915->drm.struct_mutex);
rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
@ -953,7 +822,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
i915_vm_get(vm);
i915_vm_open(vm);
args->size = 0;
args->value = ret;
@ -973,7 +842,7 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
i915_vm_put(old);
i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
@ -1003,12 +872,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
int err;
/* Magic required to prevent forcewake errors! */
err = engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
@ -1045,34 +920,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size)
return -EINVAL;
if (!ctx->vm)
if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
if (upper_32_bits(args->value))
return -ENOENT;
err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (err)
return err;
rcu_read_lock();
vm = idr_find(&file_priv->vm_idr, args->value);
if (vm)
i915_vm_get(vm);
mutex_unlock(&file_priv->vm_idr_lock);
if (vm && !kref_get_unless_zero(&vm->ref))
vm = NULL;
rcu_read_unlock();
if (!vm)
return -ENOENT;
err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
err = mutex_lock_interruptible(&ctx->mutex);
if (err)
goto out;
if (vm == ctx->vm)
if (i915_gem_context_is_closed(ctx)) {
err = -ENOENT;
goto out;
}
if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
mutex_lock(&ctx->mutex);
lut_close(ctx);
mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm);
@ -1087,13 +962,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
i915_vm_put(__set_ppgtt(ctx, old));
i915_vm_put(old);
i915_vm_close(__set_ppgtt(ctx, old));
i915_vm_close(old);
}
unlock:
mutex_unlock(&ctx->i915->drm.struct_mutex);
mutex_unlock(&ctx->mutex);
out:
i915_vm_put(vm);
return err;
@ -1112,7 +986,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_PN * PAGE_SIZE +
(CTX_R_PWR_CLK_STATE + 1) * 4;
CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
@ -1155,8 +1029,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
static int
__intel_context_reconfigure_sseu(struct intel_context *ce,
struct intel_sseu sseu)
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
@ -1179,23 +1052,6 @@ unlock:
return ret;
}
static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
struct drm_i915_private *i915 = ce->engine->i915;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
ret = __intel_context_reconfigure_sseu(ce, sseu);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
@ -1967,10 +1823,11 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
struct i915_address_space *vm;
int err = 0;
rcu_read_lock();
do {
vm = READ_ONCE(src->vm);
vm = rcu_dereference(src->vm);
if (!vm)
break;
@ -1992,7 +1849,7 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere.
*/
if (vm == READ_ONCE(src->vm))
if (vm == rcu_access_pointer(src->vm))
break;
i915_vm_put(vm);
@ -2000,11 +1857,16 @@ static int clone_vm(struct i915_gem_context *dst,
rcu_read_unlock();
if (vm) {
__assign_ppgtt(dst, vm);
if (!mutex_lock_interruptible(&dst->mutex)) {
__assign_ppgtt(dst, vm);
mutex_unlock(&dst->mutex);
} else {
err = -EINTR;
}
i915_vm_put(vm);
}
return 0;
return err;
}
static int create_clone(struct i915_user_extension __user *ext, void *data)
@ -2094,12 +1956,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ext_data.ctx = i915_gem_create_context(i915, args->flags);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@ -2226,12 +2083,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
if (ctx->vm)
args->value = ctx->vm->total;
else if (to_i915(dev)->ggtt.alias)
args->value = to_i915(dev)->ggtt.alias->vm.total;
rcu_read_lock();
if (rcu_access_pointer(ctx->vm))
args->value = rcu_dereference(ctx->vm)->total;
else
args->value = to_i915(dev)->ggtt.vm.total;
rcu_read_unlock();
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@ -2297,7 +2154,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@ -2319,7 +2176,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
@ -2332,33 +2189,6 @@ out:
return ret;
}
int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = ctx->i915;
int err = 0;
mutex_lock(&i915->contexts.mutex);
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
if (list_empty(&ctx->hw_id_link)) {
GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
err = assign_hw_id(i915, &ctx->hw_id);
if (err)
goto out_unlock;
list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
}
GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
atomic_inc(&ctx->hw_id_pin_count);
out_unlock:
mutex_unlock(&i915->contexts.mutex);
return err;
}
/* GEM context-engines iterator: for_each_gem_engine() */
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)

View File

@ -11,7 +11,9 @@
#include "gt/intel_context.h"
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@ -112,29 +114,14 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
{
if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
return 0;
return __i915_gem_context_pin_hw_id(ctx);
}
static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
{
GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
atomic_dec(&ctx->hw_id_pin_count);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
}
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
@ -173,6 +160,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
static inline struct i915_address_space *
i915_gem_context_vm(struct i915_gem_context *ctx)
{
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
}
static inline struct i915_address_space *
i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
rcu_read_lock();
vm = rcu_dereference(ctx->vm);
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = i915_vm_get(vm);
rcu_read_unlock();
return vm;
}
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{

View File

@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
struct i915_address_space *vm;
struct i915_address_space __rcu *vm;
/**
* @pid: process id of creator
@ -147,24 +147,6 @@ struct i915_gem_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
#define CONTEXT_USER_ENGINES 3
/**
* @hw_id: - unique identifier for the context
*
* The hardware needs to uniquely identify the context for a few
* functions like fault reporting, PASID, scheduling. The
* &drm_i915_private.context_hw_ida is used to assign a unqiue
* id for the lifetime of the context.
*
* @hw_id_pin_count: - number of times this context had been pinned
* for use (should be, at most, once per engine).
*
* @hw_id_link: - all contexts with an assigned id are tracked
* for possible repossession.
*/
unsigned int hw_id;
atomic_t hw_id_pin_count;
struct list_head hw_id_link;
struct mutex mutex;
struct i915_sched_attr sched;

View File

@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
if (!READ_ONCE(obj->pin_global))
if (!i915_gem_object_is_framebuffer(obj))
return;
i915_gem_object_lock(obj);
@ -288,14 +288,21 @@ restart:
if (!drm_mm_node_allocated(&vma->node))
continue;
ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
/* Wait for an earlier async bind, need to rewrite it */
ret = i915_vma_sync(vma);
if (ret)
return ret;
ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
if (ret)
return ret;
}
}
list_for_each_entry(vma, &obj->vma.list, obj_link)
vma->node.color = cache_level;
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (i915_vm_has_cache_coloring(vma->vm))
vma->node.color = cache_level;
}
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
@ -389,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
goto out;
ret = i915_gem_object_lock_interruptible(obj);
if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
mutex_unlock(&i915->drm.struct_mutex);
out:
i915_gem_object_put(obj);
@ -422,12 +424,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
assert_object_held(obj);
/* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains.
*/
obj->pin_global++;
/* The display engine is not coherent with the LLC cache on gen6. As
/*
* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
@ -439,12 +437,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_global;
}
if (ret)
return ERR_PTR(ret);
/* As the user may map the buffer once pinned in the display plane
/*
* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
@ -461,22 +458,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
goto err_unpin_global;
return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
__i915_gem_object_flush_for_display(obj);
/* It should now be out of any other write domains, and we can update
/*
* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
err_unpin_global:
obj->pin_global--;
return vma;
}
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
@ -491,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (!drm_mm_node_allocated(&vma->node))
continue;
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@ -500,7 +495,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (obj->mm.madv == I915_MADV_WILLNEED)
if (obj->mm.madv == I915_MADV_WILLNEED &&
!atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
@ -514,12 +510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
assert_object_held(obj);
if (WARN_ON(obj->pin_global == 0))
return;
if (--obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(obj);

View File

@ -252,6 +252,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@ -697,7 +698,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
mutex_lock(&eb->context->vm->mutex);
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@ -725,7 +728,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
if (ctx->vm)
if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
eb->context_flags = 0;
@ -880,6 +883,9 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
if (eb->reloc_cache.ce)
intel_context_put(eb->reloc_cache.ce);
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@ -903,6 +909,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.allocated = false;
cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@ -963,11 +970,13 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) {
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@ -1042,11 +1051,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@ -1056,7 +1067,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
offset = cache->node.start;
if (cache->node.allocated) {
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@ -1145,7 +1156,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@ -1168,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
rq = i915_request_create(eb->context);
rq = intel_context_create_request(cache->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@ -1239,6 +1250,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
if (!cache->ce) {
struct intel_context *ce;
/*
* The CS pre-parser can pre-fetch commands across
* memory sync points and starting gen12 it is able to
* pre-fetch across BB_START and BB_END boundaries
* (within the same context). We therefore use a
* separate context gen12+ to guarantee that the reloc
* writes land before the parser gets to the target
* memory location.
*/
if (cache->gen >= 12)
ce = intel_context_create(eb->context->gem_context,
eb->engine);
else
ce = intel_context_get(eb->context);
if (IS_ERR(ce))
return ERR_CAST(ce);
cache->ce = ce;
}
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@ -1388,7 +1422,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
PIN_GLOBAL);
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@ -1961,7 +1995,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
struct i915_vma *vma;
int err;
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
pool = intel_engine_get_pool(eb->engine, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
@ -2112,35 +2146,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
static int
__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
int err;
if (likely(atomic_inc_not_zero(&ce->pin_count)))
return 0;
err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
if (err)
return err;
err = __intel_context_do_pin(ce);
mutex_unlock(&eb->i915->drm.struct_mutex);
return err;
}
static void
__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
return;
mutex_lock(&eb->i915->drm.struct_mutex);
intel_context_unpin(ce);
mutex_unlock(&eb->i915->drm.struct_mutex);
}
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
@ -2160,7 +2165,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
err = __eb_pin_context(eb, ce);
err = intel_context_pin(ce);
if (err)
return err;
@ -2204,7 +2209,7 @@ err_exit:
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
__eb_unpin_context(eb, ce);
intel_context_unpin(ce);
return err;
}
@ -2217,7 +2222,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
__eb_unpin_context(eb, ce);
intel_context_unpin(ce);
}
static unsigned int

View File

@ -8,6 +8,7 @@
#include <linux/sizes.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
@ -245,21 +246,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
srcu = intel_gt_reset_trylock(ggtt->vm.gt);
if (srcu < 0) {
ret = srcu;
goto err_rpm;
}
ret = i915_mutex_lock_interruptible(dev);
ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
if (ret)
goto err_reset;
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
ret = -EFAULT;
goto err_unlock;
}
goto err_rpm;
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@ -287,10 +276,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
/* The entire mappable GGTT is pinned? Unexpected! */
GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unlock;
goto err_reset;
}
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
ret = -EFAULT;
goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@ -318,14 +316,16 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_vma_set_ggtt_write(vma);
if (write) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
i915_vma_set_ggtt_write(vma);
obj->mm.dirty = true;
}
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
@ -333,23 +333,20 @@ err_rpm:
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
/*
* We eat errors when the gpu is terminally wedged to avoid
* userspace unduly crashing (gl has no provisions for mmaps to
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
/* else, fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
* handler to reset everything when re-faulting in
* i915_mutex_lock_interruptible.
*/
default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
/* fallthrough */
case -EIO: /* shmemfs failure from swap device */
case -EFAULT: /* purged object */
case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS;
case -ENOSPC: /* shmemfs allocation failure */
case -ENOMEM: /* our allocation failure */
return VM_FAULT_OOM;
case 0:
case -EAGAIN:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@ -358,14 +355,6 @@ err:
* already did the job.
*/
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
return VM_FAULT_SIGBUS;
}
}
@ -436,6 +425,7 @@ out:
static int create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = &i915->gt;
int err;
err = drm_gem_create_mmap_offset(&obj->base);
@ -443,21 +433,12 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
/* Attempt to reap some mmap space from dead objects */
do {
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (err)
break;
err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
i915_gem_drain_freed_objects(i915);
err = drm_gem_create_mmap_offset(&obj->base);
if (!err)
break;
} while (flush_delayed_work(&i915->gem.retire_work));
return err;
i915_gem_drain_freed_objects(i915);
return drm_gem_create_mmap_offset(&obj->base);
}
int
@ -473,10 +454,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (!obj)
return -ENOENT;
if (i915_gem_object_never_bind_ggtt(obj)) {
ret = -ENODEV;
goto out;
}
ret = create_mmap_offset(obj);
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
i915_gem_object_put(obj);
return ret;
}

View File

@ -155,21 +155,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
trace_i915_gem_object_destroy(obj);
mutex_lock(&i915->drm.struct_mutex);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_destroy(vma);
/*
* Note that the vma keeps an object reference while
* it is active, so it *should* not sleep while we
* destroy it. Our debug code errs insits it *might*.
* For the moment, play along.
*/
spin_lock(&obj->vma.lock);
while ((vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
i915_vma_destroy(vma);
spin_lock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
GEM_BUG_ON(!list_empty(&obj->vma.list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);

View File

@ -106,6 +106,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
dma_resv_lock(obj->base.resv, NULL);
}
static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
{
return dma_resv_trylock(obj->base.resv);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
@ -134,28 +139,41 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
return obj->base.vma_node.readonly;
}
static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
return obj->ops->flags & flags;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
}
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
}
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
}
static inline bool
@ -406,7 +424,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
return obj->pin_global; /* currently in use by HW, keep flushed */
/* Currently in use by HW (display engine)? Keep flushed. */
return i915_gem_object_is_framebuffer(obj);
}
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)

View File

@ -32,7 +32,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(vma->size, block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_pool_get(&ce->engine->pool, size);
pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@ -216,7 +216,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(dst->size, block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_pool_get(&ce->engine->pool, size);
pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;

View File

@ -8,6 +8,7 @@
#define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
#include "i915_selftest.h"
@ -32,7 +33,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
#define I915_GEM_OBJECT_NO_GGTT BIT(3)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@ -152,17 +154,15 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
/** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;
atomic_t shrink_pin;
struct sg_table *pages;
void *mapping;
/* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of

View File

@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}

View File

@ -7,79 +7,9 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
static void call_idle_barriers(struct intel_engine_cs *engine)
{
struct llist_node *node, *next;
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
struct i915_active_request *active =
container_of((struct list_head *)node,
typeof(*active), link);
INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL);
active->retire(active, NULL);
}
}
static void i915_gem_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id)
call_idle_barriers(engine); /* cleanup after wedging */
i915_vma_parked(i915);
i915_globals_park();
}
static void idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.idle_work);
bool park;
cancel_delayed_work_sync(&i915->gem.retire_work);
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
!work_pending(work));
intel_wakeref_unlock(&i915->gt.wakeref);
if (park)
i915_gem_park(i915);
else
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
mutex_unlock(&i915->drm.struct_mutex);
}
static void retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.retire_work.work);
/* Come back later if the device is busy... */
if (mutex_trylock(&i915->drm.struct_mutex)) {
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
}
static int pm_notifier(struct notifier_block *nb,
unsigned long action,
@ -90,14 +20,10 @@ static int pm_notifier(struct notifier_block *nb,
switch (action) {
case INTEL_GT_UNPARK:
i915_globals_unpark();
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
break;
case INTEL_GT_PARK:
queue_work(i915->wq, &i915->gem.idle_work);
i915_vma_parked(i915);
break;
}
@ -108,26 +34,21 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !intel_gt_is_wedged(gt);
do {
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
result = false;
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
} while (i915_retire_requests(gt->i915) && result);
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
result = false;
}
if (intel_gt_pm_wait_for_idle(gt))
result = false;
@ -140,6 +61,24 @@ bool i915_gem_load_power_context(struct drm_i915_private *i915)
return switch_to_kernel_context_sync(&i915->gt);
}
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
intel_gt_pm_put(gt);
}
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
@ -147,7 +86,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex);
user_forcewake(&i915->gt, true);
/*
* We have to flush all the executing contexts to main memory so
@ -158,15 +97,12 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
switch_to_kernel_context_sync(&i915->gt);
mutex_unlock(&i915->drm.struct_mutex);
intel_gt_suspend(&i915->gt);
intel_uc_suspend(&i915->gt.uc);
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@ -238,13 +174,9 @@ void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
if (i915_gem_init_hw(i915))
if (intel_gt_init_hw(&i915->gt))
goto err_wedged;
/*
@ -261,9 +193,10 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (!i915_gem_load_power_context(i915))
goto err_wedged;
user_forcewake(&i915->gt, false);
out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
@ -277,9 +210,6 @@ err_wedged:
void i915_gem_init__pm(struct drm_i915_private *i915)
{
INIT_WORK(&i915->gem.idle_work, idle_work_handler);
INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
i915->gem.pm_notifier.notifier_call = pm_notifier;
blocking_notifier_chain_register(&i915->gt.pm_notifications,
&i915->gem.pm_notifier);

View File

@ -16,40 +16,6 @@
#include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *i915,
unsigned int flags,
bool *unlock)
{
struct mutex *m = &i915->drm.struct_mutex;
switch (mutex_trylock_recursive(m)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
if (flags & I915_SHRINK_ACTIVE &&
mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
*unlock = true;
return *unlock;
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
}
BUG();
}
static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
{
if (!unlock)
return;
mutex_unlock(&i915->drm.struct_mutex);
}
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
/*
* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
@ -72,16 +39,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
/* If any vma are "permanently" pinned, it will prevent us from
* reclaiming the obj->mm.pages. We only allow scanout objects to claim
* a permanent pin, along with a few others like the context objects.
* To simplify the scan, and to avoid walking the list of vma under the
* object, we just check the count of its permanently pinned.
*/
if (READ_ONCE(obj->pin_global))
return false;
/* We can only return physical pages to the system if we can either
/*
* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
@ -162,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
if (!shrinker_lock(i915, shrink, &unlock))
return 0;
/*
* When shrinking the active list, we should also consider active
@ -275,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
shrinker_unlock(i915, unlock);
if (nr_scanned)
*nr_scanned += scanned;
return count;
@ -346,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
bool unlock;
sc->nr_scanned = 0;
if (!shrinker_lock(i915, 0, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@ -373,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
shrinker_unlock(i915, unlock);
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@ -391,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@ -426,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
bool unlock;
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@ -446,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
mutex_unlock(&i915->ggtt.vm.mutex);
if (i915_vma_unbind(vma) == 0)
if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
@ -497,22 +436,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
/*
* As we invariably rely on the struct_mutex within the shrinker,
* but have a complicated recursion dance, taint all the mutexes used
* within the shrinker with the struct_mutex. For completeness, we
* taint with all subclass of struct_mutex, even though we should
* only need tainting by I915_MM_NORMAL to catch possible ABBA
* deadlocks from using struct_mutex inside @mutex.
*/
mutex_acquire(&i915->drm.struct_mutex.dep_map,
I915_MM_SHRINKER, 0, _RET_IP_);
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, 0, _RET_IP_);
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
if (unlock)
@ -523,46 +449,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
/*
* We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
if (!list_empty(&obj->mm.link)) { /* pinned by caller */
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(list_empty(&obj->mm.link));
if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
return;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
!list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!list_empty(&obj->mm.link));
if (!i915_gem_object_is_shrinkable(obj))
return;
if (i915_gem_object_is_shrinkable(obj)) {
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
return;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(!kref_read(&obj->base.refcount));
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(!kref_read(&obj->base.refcount));
if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
GEM_BUG_ON(!list_empty(&obj->mm.link));
list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)

View File

@ -425,8 +425,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
case 11:
default:
MISSING_CASE(INTEL_GEN(dev_priv));
/* fall-through */
case 11:
case 12:
icl_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@ -550,10 +553,11 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
{
struct drm_i915_gem_object *obj;
unsigned int cache_level;
int err = -ENOMEM;
obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
if (!obj)
goto err;
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
@ -563,14 +567,16 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
if (i915_gem_object_pin_pages(obj))
err = i915_gem_object_pin_pages(obj);
if (err)
goto cleanup;
return obj;
cleanup:
i915_gem_object_free(obj);
return NULL;
err:
return ERR_PTR(err);
}
struct drm_i915_gem_object *
@ -582,28 +588,32 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
return ERR_PTR(-ENODEV);
if (size == 0)
return NULL;
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
return ERR_PTR(-ENOMEM);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
kfree(stolen);
return NULL;
obj = ERR_PTR(ret);
goto err_free;
}
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj)
return obj;
if (IS_ERR(obj))
goto err_remove;
return obj;
err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
err_free:
kfree(stolen);
return NULL;
return obj;
}
struct drm_i915_gem_object *
@ -619,9 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@ -630,11 +638,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL;
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset;
stolen->size = size;
@ -644,15 +652,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
return ERR_PTR(ret);
}
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) {
if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
return obj;
}
/* Some objects just need physical mem from stolen space */
@ -674,22 +682,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
mutex_lock(&ggtt->vm.mutex);
list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
@ -701,5 +713,5 @@ err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
return ERR_PTR(ret);
}

View File

@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
if (target) {
if (target && xchg(&target->file_priv, NULL))
list_del(&target->client_link);
target->file_priv = NULL;
}
target = request;
}

View File

@ -181,22 +181,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
int ret;
int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
ret = i915_vma_unbind(vma);
ret = __i915_vma_unbind(vma);
if (ret)
return ret;
break;
}
mutex_unlock(&ggtt->vm.mutex);
return 0;
return ret;
}
int
@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
err = i915_gem_object_fence_prepare(obj, tiling, stride);
if (err)
return err;
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
err = i915_gem_object_fence_prepare(obj, tiling, stride);
if (err) {
i915_gem_object_unlock(obj);
return err;
}
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@ -313,10 +317,14 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
if (!dev_priv->ggtt.num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@ -364,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
err = mutex_lock_interruptible(&dev->struct_mutex);
if (err)
goto err;
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
@ -402,6 +405,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
if (!dev_priv->ggtt.num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {

View File

@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
if (!unlock) {
unlock = &mn->mm->i915->drm.struct_mutex;
switch (mutex_trylock_recursive(unlock)) {
default:
case MUTEX_TRYLOCK_FAILED:
if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
i915_gem_object_put(obj);
return -EINTR;
}
/* fall through */
case MUTEX_TRYLOCK_SUCCESS:
break;
case MUTEX_TRYLOCK_RECURSIVE:
unlock = ERR_PTR(-EEXIST);
break;
}
}
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
goto unlock;
return ret;
spin_lock(&mn->lock);
@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
unlock:
if (!IS_ERR_OR_NULL(unlock))
mutex_unlock(unlock);
return ret;
}
@ -702,6 +677,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
@ -782,7 +758,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
vm = dev_priv->kernel_context->vm;
vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
true); /* static vm */
if (!vm || !vm->has_read_only)
return -ENODEV;
}

View File

@ -333,7 +333,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
int err;
/* We have to wait for the async bind to complete before our asserts */
err = i915_vma_sync(vma);
if (err)
return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@ -879,9 +884,8 @@ out_object_put:
return err;
}
static int gpu_write(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int gpu_write(struct intel_context *ce,
struct i915_vma *vma,
u32 dw,
u32 val)
{
@ -893,7 +897,7 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val);
}
@ -929,18 +933,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
static int __igt_write_huge(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@ -954,7 +956,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
if (err == -ENOSPC && i915_is_ggtt(vm))
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
@ -964,7 +966,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
err = gpu_write(vma, ctx, engine, dword, val);
err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
@ -987,14 +989,13 @@ out_vma_close:
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int id;
unsigned int count;
u64 max;
u64 num;
u64 size;
@ -1008,19 +1009,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64((vm->total - size), max_page_size);
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
pr_info("store-dword-imm not supported on engine=%u\n",
id);
count = 0;
max = U64_MAX;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
count++;
if (!intel_engine_can_store_dword(ce->engine))
continue;
}
engines[n++] = engine;
}
max = min(max, ce->vm->total);
n++;
}
i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
@ -1029,23 +1029,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(n * I915_NUM_ENGINES, &prng);
order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
struct intel_context *ce;
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
ce = engines->engines[order[i] % engines->num_engines];
i = (i + 1) % (count * count);
if (!ce || !intel_engine_can_store_dword(ce->engine))
continue;
/*
* In order to utilize 64K pages we need to both pad the vma
@ -1057,22 +1064,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
err = __igt_write_huge(ctx, engine, obj, size, offset_low,
err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
err = __igt_write_huge(ctx, engine, obj, size, offset_high,
err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
__func__, engine->id, offset_low, offset_high,
"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
__func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
i915_gem_context_unlock_engines(ctx);
kfree(order);
@ -1314,15 +1322,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int n;
int first, last;
int err;
int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
@ -1332,9 +1340,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
if (!vm || !i915_vm_is_4lvl(vm)) {
vm = i915_gem_context_get_vm_rcu(ctx);
if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
@ -1387,7 +1396,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
@ -1419,14 +1428,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/
n = 0;
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
i915_gem_context_unlock_engines(ctx);
if (err)
goto out_unpin;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
@ -1439,6 +1452,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
return err;
}
@ -1448,7 +1463,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@ -1498,6 +1513,7 @@ out_put:
out_restore:
i915->mm.gemfs = gemfs;
i915_vm_put(vm);
return err;
}
@ -1505,14 +1521,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
int err;
int err = 0;
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
@ -1521,12 +1537,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n");
return 0;
goto out_vm;
}
obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj))
return PTR_ERR(obj);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@ -1548,16 +1566,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin;
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
if (err)
goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@ -1583,16 +1604,17 @@ static int igt_shrink_thp(void *arg)
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
goto out_unpin;
break;
}
out_unpin:
i915_vma_unpin(vma);
out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
return err;
}
@ -1617,7 +1639,6 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
@ -1643,9 +1664,7 @@ out_close:
i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
drm_dev_put(&dev_priv->drm);
return err;
}
@ -1661,7 +1680,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
};
struct drm_file *file;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct i915_address_space *vm;
int err;
if (!HAS_PPGTT(i915)) {
@ -1676,25 +1695,21 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
goto out_file;
}
if (ctx->vm)
ctx->vm->scrub_64K = true;
mutex_lock(&ctx->mutex);
vm = i915_gem_context_vm(ctx);
if (vm)
WRITE_ONCE(vm->scrub_64K, true);
mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
out_file:
mock_file_free(i915, file);
return err;
}

View File

@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
u32 __iomem *map;
int err;
int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto out_rpm;
}
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
out_rpm:
intel_gt_pm_put(vma->vm->gt);
return err;
}
static int gtt_get(struct drm_i915_gem_object *obj,
@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
u32 __iomem *map;
int err;
int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto out_rpm;
}
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
out_rpm:
intel_gt_pm_put(vma->vm->gt);
return err;
}
static int wc_set(struct drm_i915_gem_object *obj,
@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto unlock;
goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
}
}
}
unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
free:
kfree(offsets);
return err;
put_object:
i915_gem_object_put(obj);
goto unlock;
goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)

View File

@ -8,6 +8,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@ -52,19 +53,17 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
goto out_unlock;
goto out_file;
}
for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
goto out_file;
}
}
@ -78,7 +77,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
goto out_file;
}
i915_request_add(rq);
}
@ -86,7 +85,7 @@ static int live_nop_switch(void *arg)
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto out_unlock;
goto out_file;
}
times[1] = ktime_get_raw();
@ -96,7 +95,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
@ -106,7 +105,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
goto out_file;
}
/*
@ -142,7 +141,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
goto out_unlock;
goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@ -150,8 +149,212 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
out_file:
mock_file_free(i915, file);
return err;
}
struct parallel_switch {
struct task_struct *tsk;
struct intel_context *ce[2];
};
static int __live_parallel_switch1(void *data)
{
struct parallel_switch *arg = data;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_request *rq = NULL;
int err, n;
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
i915_request_put(rq);
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_get(rq);
i915_request_add(rq);
}
err = 0;
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
return 0;
}
static int __live_parallel_switchN(void *data)
{
struct parallel_switch *arg = data;
IGT_TIMEOUT(end_time);
unsigned long count;
int n;
count = 0;
do {
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *rq;
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_add(rq);
}
count++;
} while (!__igt_timeout(end_time, NULL));
pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
return 0;
}
static int live_parallel_switch(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
__live_parallel_switch1,
__live_parallel_switchN,
NULL,
};
struct parallel_switch *data = NULL;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
int (* const *fn)(void *arg);
struct i915_gem_context *ctx;
struct intel_context *ce;
struct drm_file *file;
int n, m, count;
int err = 0;
/*
* Check we can process switches on all engines simultaneously.
*/
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_file;
}
engines = i915_gem_context_lock_engines(ctx);
count = engines->num_engines;
data = kcalloc(count, sizeof(*data), GFP_KERNEL);
if (!data) {
i915_gem_context_unlock_engines(ctx);
err = -ENOMEM;
goto out;
}
m = 0; /* Use the first context as our template for the engines */
for_each_gem_engine(ce, engines, it) {
err = intel_context_pin(ce);
if (err) {
i915_gem_context_unlock_engines(ctx);
goto out;
}
data[m++].ce[0] = intel_context_get(ce);
}
i915_gem_context_unlock_engines(ctx);
/* Clone the same set of engines into the other contexts */
for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
for (m = 0; m < count; m++) {
if (!data[m].ce[0])
continue;
ce = intel_context_create(ctx, data[m].ce[0]->engine);
if (IS_ERR(ce))
goto out;
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
goto out;
}
data[m].ce[n] = ce;
}
}
for (fn = func; !err && *fn; fn++) {
struct igt_live_test t;
int n;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
break;
for (n = 0; n < count; n++) {
if (!data[n].ce[0])
continue;
data[n].tsk = kthread_run(*fn, &data[n],
"igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(data[n].tsk)) {
err = PTR_ERR(data[n].tsk);
break;
}
get_task_struct(data[n].tsk);
}
for (n = 0; n < count; n++) {
int status;
if (IS_ERR_OR_NULL(data[n].tsk))
continue;
status = kthread_stop(data[n].tsk);
if (status && !err)
err = status;
put_task_struct(data[n].tsk);
data[n].tsk = NULL;
}
if (igt_live_test_end(&t))
err = -EIO;
}
out:
for (n = 0; n < count; n++) {
for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
if (!data[n].ce[m])
continue;
intel_context_unpin(data[n].ce[m]);
intel_context_put(data[n].ce[m]);
}
}
kfree(data);
out_file:
mock_file_free(i915, file);
return err;
}
@ -166,28 +369,20 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
static int gpu_fill(struct drm_i915_gem_object *obj,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
static int gpu_fill(struct intel_context *ce,
struct drm_i915_gem_object *obj,
unsigned int dw)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
GEM_BUG_ON(obj->base.size > ce->vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
vma = i915_vma_instance(obj, vm, NULL);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return err;
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
@ -200,9 +395,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view
* into the object.
*/
err = igt_gpu_fill_dw(vma,
ctx,
engine,
err = igt_gpu_fill_dw(ce, vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
@ -305,22 +498,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx,
create_test_object(struct i915_address_space *vm,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
/* Keep in GEM's good graces */
i915_retire_requests(ctx->i915);
intel_gt_retire_requests(vm->gt);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
@ -348,6 +540,45 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE;
}
static void throttle_release(struct i915_request **q, int count)
{
int i;
for (i = 0; i < count; i++) {
if (IS_ERR_OR_NULL(q[i]))
continue;
i915_request_put(fetch_and_zero(&q[i]));
}
}
static int throttle(struct intel_context *ce,
struct i915_request **q, int count)
{
int i;
if (!IS_ERR_OR_NULL(q[0])) {
if (i915_request_wait(q[0],
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT) < 0)
return -EINTR;
i915_request_put(q[0]);
}
for (i = 0; i < count - 1; i++)
q[i] = q[i + 1];
q[i] = intel_context_create_request(ce);
if (IS_ERR(q[i]))
return PTR_ERR(q[i]);
i915_request_get(q[i]);
i915_request_add(q[i]);
return 0;
}
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
@ -367,6 +598,7 @@ static int igt_ctx_exec(void *arg)
for_each_engine(engine, i915, id) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
struct i915_request *tq[5] = {};
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
@ -382,39 +614,53 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
goto out_file;
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = live_context(i915, file);
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
goto out_file;
}
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
obj = create_test_object(ctx, file, &objects);
obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_unlock;
intel_context_put(ce);
kernel_context_close(ctx);
goto out_file;
}
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->vm), err);
goto out_unlock;
engine->name,
yesno(!!rcu_access_pointer(ctx->vm)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
goto out_file;
}
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
intel_context_put(ce);
kernel_context_close(ctx);
goto out_file;
}
if (++dw == max_dwords(obj)) {
@ -424,6 +670,9 @@ static int igt_ctx_exec(void *arg)
ndwords++;
ncontexts++;
intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@ -441,10 +690,10 @@ static int igt_ctx_exec(void *arg)
dw += rem;
}
out_unlock:
out_file:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
@ -459,6 +708,7 @@ out_unlock:
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *tq[5] = {};
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -478,22 +728,20 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
goto out_unlock;
goto out_file;
}
if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
goto out_unlock;
goto out_file;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
goto out_file;
for_each_engine(engine, i915, id) {
unsigned long ncontexts, ndwords, dw;
@ -509,6 +757,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@ -516,23 +765,38 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, parent->vm);
mutex_unlock(&ctx->mutex);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
obj = create_test_object(parent, file, &objects);
obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->vm), err);
engine->name,
yesno(!!rcu_access_pointer(ctx->vm)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
@ -545,6 +809,7 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@ -562,16 +827,13 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
}
out_test:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
out_file:
mock_file_free(i915, file);
return err;
}
@ -604,6 +866,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@ -681,10 +945,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
i915_vma_unpin(batch);
i915_vma_close(batch);
i915_vma_put(batch);
i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@ -698,8 +959,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@ -860,8 +1120,8 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
ret = i915_gem_wait_for_idle(ce->engine->i915,
0, MAX_SCHEDULE_TIMEOUT);
ret = intel_gt_wait_for_idle(ce->engine->gt,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@ -887,7 +1147,7 @@ __sseu_test(const char *name,
if (ret)
return ret;
ret = __intel_context_reconfigure_sseu(ce, sseu);
ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
@ -945,8 +1205,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
@ -991,7 +1249,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_fail;
out_fail:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
ret = -EIO;
intel_context_unpin(ce);
@ -1001,8 +1259,6 @@ out_put:
i915_gem_object_put(obj);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt);
@ -1041,6 +1297,7 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
struct i915_request *tq[5] = {};
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx, ndwords, dw;
@ -1061,52 +1318,63 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
goto out_file;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
goto out_file;
}
vm = ctx->vm ?: &i915->ggtt.alias->vm;
rcu_read_lock();
vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
rcu_read_unlock();
err = 0;
goto out_unlock;
goto out_file;
}
rcu_read_unlock();
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
unsigned int id;
struct i915_gem_engines_iter it;
struct intel_context *ce;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
obj = create_test_object(ctx, file, &objects);
obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_unlock;
i915_gem_context_unlock_engines(ctx);
goto out_file;
}
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
err = gpu_fill(obj, ctx, engine, dw);
err = gpu_fill(ce, obj, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->vm), err);
goto out_unlock;
ce->engine->name,
yesno(!!rcu_access_pointer(ctx->vm)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
}
err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) {
i915_gem_context_unlock_engines(ctx);
goto out_file;
}
if (++dw == max_dwords(obj)) {
@ -1115,6 +1383,7 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
i915_gem_context_unlock_engines(ctx);
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines);
@ -1137,19 +1406,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem;
}
out_unlock:
out_file:
throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static int check_scratch(struct i915_gem_context *ctx, u64 offset)
static int check_scratch(struct i915_address_space *vm, u64 offset)
{
struct drm_mm_node *node =
__drm_mm_interval_first(&ctx->vm->mm,
__drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@ -1167,6 +1436,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
@ -1197,17 +1467,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, ctx->vm, NULL);
intel_gt_chipset_flush(engine->gt);
vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
goto err;
goto err_vm;
err = check_scratch(ctx, offset);
err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@ -1229,12 +1502,11 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
i915_vma_unpin(vma);
i915_vma_close(vma);
i915_vma_put(vma);
i915_vma_unpin_and_release(&vma, 0);
i915_request_add(rq);
i915_vm_put(vm);
return 0;
skip_request:
@ -1243,6 +1515,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
err_vm:
i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@ -1254,6 +1528,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
@ -1296,17 +1571,20 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, ctx->vm, NULL);
intel_gt_chipset_flush(engine->gt);
vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
goto err;
goto err_vm;
err = check_scratch(ctx, offset);
err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@ -1337,12 +1615,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
goto err;
goto err_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
goto err_vm;
}
*value = cmd[result / sizeof(*cmd)];
@ -1357,6 +1635,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
err_vm:
i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@ -1387,27 +1667,25 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
goto out_file;
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
goto out_file;
}
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
goto out_file;
}
/* We can only test vm isolation, if the vm are distinct */
if (ctx_a->vm == ctx_b->vm)
goto out_unlock;
goto out_file;
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
@ -1436,7 +1714,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
goto out_unlock;
goto out_file;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@ -1445,7 +1723,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
goto out_unlock;
goto out_file;
}
this++;
@ -1455,30 +1733,13 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
out_unlock:
out_file:
if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static __maybe_unused const char *
__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
if (engines == ALL_ENGINES)
return "all";
for_each_engine_masked(engine, i915, engines, tmp)
return engine->name;
return "none";
}
static bool skip_unused_engines(struct intel_context *ce, void *data)
{
return !ce->state;
@ -1506,13 +1767,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new.
*/
mutex_lock(&i915->drm.struct_mutex);
ctx = mock_context(i915, "mock");
if (!ctx) {
err = -ENOMEM;
goto unlock;
}
if (!ctx)
return -ENOMEM;
counter = 0;
err = context_barrier_task(ctx, 0,
@ -1585,8 +1842,6 @@ static int mock_context_barrier(void *arg)
out:
mock_context_close(ctx);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
#undef pr_fmt
#define pr_fmt(x) x
@ -1614,6 +1869,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
SUBTEST(live_parallel_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),

View File

@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
struct tile {
@ -76,7 +77,97 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
static int check_partial_mapping(struct drm_i915_gem_object *obj,
const struct tile *tile,
unsigned long end_time)
struct rnd_state *prng)
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
u32 __iomem *io;
struct page *p;
unsigned int n;
u64 offset;
u32 *cpu;
int err;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
tile->tiling, tile->stride, err);
return err;
}
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
pr_err("Failed to flush to GTT write domain; err=%d\n", err);
return err;
}
page = i915_prandom_u32_max_state(npages, prng);
view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
page, (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
n = page - view.partial.offset;
GEM_BUG_ON(n >= view.partial.size);
io = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(io)) {
pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
page, (int)PTR_ERR(io));
err = PTR_ERR(io);
goto out;
}
iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
i915_vma_unpin_iomap(vma);
offset = tiled_offset(tile, page << PAGE_SHIFT);
if (offset >= obj->base.size)
goto out;
intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
page, n,
view.partial.offset,
view.partial.size,
vma->size >> PAGE_SHIFT,
tile->tiling ? tile_row_pages(obj) : 0,
vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
offset >> PAGE_SHIFT,
(unsigned int)offset_in_page(offset),
offset,
(u32)page, *cpu);
err = -EINVAL;
}
*cpu = 0;
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap(p);
out:
i915_vma_destroy(vma);
return err;
}
static int check_partial_mappings(struct drm_i915_gem_object *obj,
const struct tile *tile,
unsigned long end_time)
{
const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
@ -84,11 +175,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
unsigned long page;
int err;
if (igt_timeout(end_time,
"%s: timed out before tiling=%d stride=%d\n",
__func__, tile->tiling, tile->stride))
return -EINTR;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return err;
i915_vma_destroy(vma);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
__func__, tile->tiling, tile->stride))
return -EINTR;
}
return 0;
}
static unsigned int
setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) <= 2) {
tile->height = 16;
tile->width = 128;
tile->size = 11;
} else if (tile->tiling == I915_TILING_Y &&
HAS_128_BYTE_Y_TILING(i915)) {
tile->height = 32;
tile->width = 128;
tile->size = 12;
} else {
tile->height = 8;
tile->width = 512;
tile->size = 12;
}
if (INTEL_GEN(i915) < 4)
return 8192 / tile->width;
else if (INTEL_GEN(i915) < 7)
return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
else
return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
}
static int igt_partial_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
@ -205,7 +322,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@ -219,7 +335,7 @@ static int igt_partial_tiling(void *arg)
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
tile.tiling = I915_TILING_NONE;
err = check_partial_mapping(obj, &tile, end);
err = check_partial_mappings(obj, &tile, end);
if (err && err != -EINTR)
goto out_unlock;
}
@ -253,31 +369,11 @@ static int igt_partial_tiling(void *arg)
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
if (INTEL_GEN(i915) <= 2) {
tile.height = 16;
tile.width = 128;
tile.size = 11;
} else if (tile.tiling == I915_TILING_Y &&
HAS_128_BYTE_Y_TILING(i915)) {
tile.height = 32;
tile.width = 128;
tile.size = 12;
} else {
tile.height = 8;
tile.width = 512;
tile.size = 12;
}
if (INTEL_GEN(i915) < 4)
max_pitch = 8192 / tile.width;
else if (INTEL_GEN(i915) < 7)
max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
else
max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
max_pitch = setup_tile_size(&tile, i915);
for (pitch = max_pitch; pitch; pitch >>= 1) {
tile.stride = tile.width * pitch;
err = check_partial_mapping(obj, &tile, end);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@ -285,7 +381,7 @@ static int igt_partial_tiling(void *arg)
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
err = check_partial_mapping(obj, &tile, end);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@ -294,7 +390,7 @@ static int igt_partial_tiling(void *arg)
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
err = check_partial_mapping(obj, &tile, end);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@ -305,7 +401,7 @@ static int igt_partial_tiling(void *arg)
if (INTEL_GEN(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
err = check_partial_mapping(obj, &tile, end);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@ -318,7 +414,97 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return err;
}
static int igt_smoke_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
I915_RND_STATE(prng);
unsigned long count;
IGT_TIMEOUT(end);
int err;
/*
* igt_partial_tiling() does an exhastive check of partial tiling
* chunking, but will undoubtably run out of time. Here, we do a
* randomised search and hope over many runs of 1s with different
* seeds we will do a thorough check.
*
* Remember to look at the st_seed if we see a flip-flop in BAT!
*/
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
return 0;
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
(1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err) {
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
}
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
do {
struct tile tile;
tile.tiling =
i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
switch (tile.tiling) {
case I915_TILING_NONE:
tile.height = 1;
tile.width = 1;
tile.size = 0;
tile.stride = 0;
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
break;
case I915_TILING_X:
tile.swizzle = i915->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
tile.swizzle = i915->mm.bit_6_swizzle_y;
break;
}
if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
if (tile.tiling != I915_TILING_NONE) {
unsigned int max_pitch = setup_tile_size(&tile, i915);
tile.stride =
i915_prandom_u32_max_state(max_pitch, &prng);
tile.stride = (1 + tile.stride) * tile.width;
if (INTEL_GEN(i915) < 4)
tile.stride = rounddown_pow_of_two(tile.stride);
}
err = check_partial_mapping(obj, &tile, &prng);
if (err)
break;
count++;
} while (!__igt_timeout(end, NULL));
pr_info("%s: Completed %lu trials\n", __func__, count);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
@ -386,21 +572,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work);
cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_driver_register__shrinker(i915);
}
@ -490,9 +669,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
mutex_lock(&i915->drm.struct_mutex);
err = make_obj_busy(obj);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
@ -515,6 +692,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};

View File

@ -65,9 +65,7 @@ static int igt_fill_blt(void *arg)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@ -166,9 +164,7 @@ static int igt_copy_blt(void *arg)
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;

View File

@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
goto out;
}
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;

View File

@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@ -101,40 +104,35 @@ err:
return ERR_PTR(err);
}
int igt_gpu_fill_dw(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset,
unsigned long count,
u32 val)
int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_vma *vma, u64 offset,
unsigned long count, u32 val)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *batch;
unsigned int flags;
int err;
GEM_BUG_ON(vma->size > vm->total);
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch))
return PTR_ERR(batch);
rq = igt_request_alloc(ctx, engine);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
if (err)
goto err_request;
@ -156,9 +154,7 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
i915_request_add(rq);
i915_vma_unpin(batch);
i915_vma_close(batch);
i915_vma_put(batch);
i915_vma_unpin_and_release(&batch, 0);
return 0;
@ -167,7 +163,6 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
i915_vma_unpin_and_release(&batch, 0);
return err;
}

View File

@ -11,9 +11,11 @@
struct i915_request;
struct i915_gem_context;
struct intel_engine_cs;
struct i915_vma;
struct intel_context;
struct intel_engine_cs;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count,
u32 val);
int igt_gpu_fill_dw(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset,
unsigned long count,
u32 val);
int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_vma *vma, u64 offset,
unsigned long count, u32 val);
#endif /* __IGT_GEM_UTILS_H__ */

View File

@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@ -30,13 +29,8 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_engines;
if (name) {
struct i915_ppgtt *ppgtt;
@ -48,14 +42,15 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);
i915_vm_put(&ppgtt->vm);
}
return ctx;
err_engines:
free_engines(rcu_access_pointer(ctx->engines));
err_free:
kfree(ctx);
return NULL;
@ -73,7 +68,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
init_contexts(i915);
init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
@ -82,8 +77,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
struct i915_gem_context *ctx;
int err;
lockdep_assert_held(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;

View File

@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence,
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
lockdep_assert_irqs_disabled();
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
@ -134,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
unsigned long flags;
LIST_HEAD(signal);
spin_lock(&b->irq_lock);
spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@ -182,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
}
}
spin_unlock(&b->irq_lock);
spin_unlock_irqrestore(&b->irq_lock, flags);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
spin_lock(&rq->lock);
spin_lock_irqsave(&rq->lock, flags);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
spin_unlock(&rq->lock);
spin_unlock_irqrestore(&rq->lock, flags);
i915_request_put(rq);
}
}
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
local_irq_disable();
intel_engine_breadcrumbs_irq(engine);
local_irq_enable();
}
static void signal_irq_work(struct irq_work *work)
{
struct intel_engine_cs *engine =
@ -275,7 +268,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
lockdep_assert_irqs_disabled();
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
@ -325,7 +317,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
lockdep_assert_held(&rq->lock);
lockdep_assert_irqs_disabled();
/*
* We must wait for b->irq_lock so that we know the interrupt handler

View File

@ -134,10 +134,11 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma)
{
__i915_vma_unpin(vma);
i915_vma_make_shrinkable(vma);
__i915_vma_unpin(vma);
}
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@ -150,6 +151,7 @@ static void __intel_context_retire(struct i915_active *active)
intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
@ -219,12 +221,20 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct i915_address_space *vm;
GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref);
ce->gem_context = ctx;
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
rcu_read_lock();
vm = rcu_dereference(ctx->vm);
if (vm)
ce->vm = i915_vm_get(vm);
else
ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
rcu_read_unlock();
if (ctx->timeline)
ce->timeline = intel_timeline_get(ctx->timeline);
@ -238,7 +248,7 @@ intel_context_init(struct intel_context *ce,
mutex_init(&ce->pin_mutex);
i915_active_init(ctx->i915, &ce->active,
i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire);
}
@ -298,14 +308,14 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->hw_context == ce);
if (rq->timeline != tl) { /* beware timeline sharing */
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
err = mutex_lock_interruptible_nested(&tl->mutex,
SINGLE_DEPTH_NESTING);
if (err)
return err;
/* Queue this switch after current activity by this context. */
err = i915_active_request_set(&tl->last_request, rq);
err = i915_active_fence_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex);
if (err)
return err;
@ -319,7 +329,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
return i915_active_ref(&ce->active, rq->timeline, rq);
return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)

View File

@ -58,6 +58,7 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
u32 tag; /* cookie passed to HW to track this context on submission */
unsigned int active_count; /* protected by timeline->mutex */

View File

@ -335,7 +335,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void

View File

@ -680,6 +680,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
engine->status_page.vma))
goto out_frame;
mutex_lock(&frame->timeline.mutex);
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@ -688,18 +690,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
frame->rq.timeline = &frame->timeline;
rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
spin_lock_irq(&engine->active.lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
spin_unlock_irq(&engine->active.lock);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
intel_timeline_unpin(&frame->timeline);
out_timeline:
mutex_unlock(&frame->timeline.mutex);
intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
@ -814,8 +820,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
if (engine->kernel_context) {
intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
}
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@ -948,6 +956,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@ -965,7 +974,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(i915, slice, subslice) {
for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
@ -1118,6 +1127,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
case 4:
return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
@ -1193,6 +1204,27 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
/*
* Even though we are holding the engine->active.lock here, there
* is no control over the submission queue per-se and we are
* inspecting the active state at a random point in time, with an
* unknown queue. Play safe and make sure the timeline remains valid.
* (Only being used for pretty printing, one extra kref shouldn't
* cause a camel stampede!)
*/
rcu_read_lock();
tl = rcu_dereference(rq->timeline);
if (!kref_get_unless_zero(&tl->kref))
tl = NULL;
rcu_read_unlock();
return tl;
}
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
@ -1287,27 +1319,37 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = snprintf(hdr, sizeof(hdr),
"\t\tActive[%d: ",
"\t\tActive[%d]: ",
(int)(port - execlists->active));
if (!i915_request_signaled(rq))
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
len += snprintf(hdr + len, sizeof(hdr) - len,
"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
i915_ggtt_offset(rq->ring->vma),
rq->timeline->hwsp_offset,
tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
if (tl)
intel_timeline_put(tl);
}
snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
struct intel_timeline *tl = get_timeline(rq);
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
(int)(port - execlists->pending),
i915_ggtt_offset(rq->ring->vma),
rq->timeline->hwsp_offset,
tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
print_request(m, rq, hdr);
if (tl)
intel_timeline_put(tl);
}
spin_unlock_irqrestore(&engine->active.lock, flags);
} else if (INTEL_GEN(dev_priv) > 6) {
@ -1385,6 +1427,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
@ -1397,10 +1441,19 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
rq->timeline->hwsp_offset);
if (tl) {
drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
tl->hwsp_offset);
intel_timeline_put(tl);
}
print_request_ring(m, rq);
if (rq->hw_context->lrc_reg_state) {
drm_printf(m, "Logical Ring Context:\n");
hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
}
}
spin_unlock_irqrestore(&engine->active.lock, flags);

View File

@ -11,6 +11,7 @@
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
@ -103,7 +104,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
intel_timeline_enter(rq->timeline);
intel_timeline_enter(i915_request_timeline(rq));
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
@ -123,6 +124,19 @@ out_unlock:
return result;
}
static void call_idle_barriers(struct intel_engine_cs *engine)
{
struct llist_node *node, *next;
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
struct dma_fence_cb *cb =
container_of((struct list_head *)node,
typeof(*cb), node);
cb->func(NULL, cb);
}
}
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
@ -142,6 +156,8 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
call_idle_barriers(engine); /* cleanup after wedging */
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);

View File

@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref)
return 0;
}
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
@ -94,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_PTR(-ENOMEM);
node->pool = pool;
i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) {
@ -107,9 +108,19 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
struct intel_engine_pool_node *
intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
{
if (intel_engine_is_virtual(engine))
engine = intel_virtual_engine_get_sibling(engine, 0);
GEM_BUG_ON(!engine);
return &engine->pool;
}
struct intel_engine_pool_node *
intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
{
struct intel_engine_pool *pool = lookup_pool(engine);
struct intel_engine_pool_node *node;
struct list_head *list;
unsigned long flags;

View File

@ -12,13 +12,13 @@
#include "i915_request.h"
struct intel_engine_pool_node *
intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
return i915_active_ref(&node->active, rq->timeline, rq);
return i915_active_add_request(&node->active, rq);
}
static inline void

View File

@ -303,10 +303,12 @@ struct intel_engine_cs {
u8 uabi_class;
u8 uabi_instance;
u32 uabi_capabilities;
u32 context_size;
u32 mmio_base;
u32 uabi_capabilities;
unsigned int context_tag;
#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
struct rb_node uabi_node;
@ -481,6 +483,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
unsigned int flags;
/*
@ -576,20 +579,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
#define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
static inline bool
intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
{
return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
}
#define instdone_subslice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
(slice__) < I915_MAX_SLICES; \
(subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
(slice__) += ((subslice__) == 0)) \
for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
(BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
(IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
intel_sseu_has_subslice(sseu__, 0, subslice__))
#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
(subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
(slice_) += ((subslice_) == 0)) \
for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
(instdone_has_subslice(dev_priv_, sseu_, slice_, \
subslice_)))
#endif /* __INTEL_ENGINE_TYPES_H__ */

View File

@ -112,6 +112,7 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@ -119,6 +120,8 @@
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
#define MI_SEMAPHORE_TOKEN_SHIFT 5
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
@ -132,6 +135,8 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
#define MI_LRI_CS_MMIO (1<<19)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
@ -147,6 +152,7 @@
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@ -235,6 +241,29 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
/* Opcodes for MI_MATH_INSTR */
#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
/* Registers used as operands in MI_MATH_INSTR */
#define MI_MATH_REG(x) (x)
#define MI_MATH_REG_SRCA 0x20
#define MI_MATH_REG_SRCB 0x21
#define MI_MATH_REG_ACCU 0x31
#define MI_MATH_REG_ZF 0x32
#define MI_MATH_REG_CF 0x33
/*
* Commands used only by the command parser
*/
@ -251,7 +280,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)

View File

@ -6,7 +6,11 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
#include "intel_rc6.h"
#include "intel_uncore.h"
#include "intel_pm.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@ -20,13 +24,106 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
}
void intel_gt_init_hw(struct drm_i915_private *i915)
void intel_gt_init_hw_early(struct drm_i915_private *i915)
{
i915->gt.ggtt = &i915->ggtt;
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_gt_pm_disable(&i915->gt);
}
static void init_unused_ring(struct intel_gt *gt, u32 base)
{
struct intel_uncore *uncore = gt->uncore;
intel_uncore_write(uncore, RING_CTL(base), 0);
intel_uncore_write(uncore, RING_HEAD(base), 0);
intel_uncore_write(uncore, RING_TAIL(base), 0);
intel_uncore_write(uncore, RING_START(base), 0);
}
static void init_unused_rings(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
if (IS_I830(i915)) {
init_unused_ring(gt, PRB1_BASE);
init_unused_ring(gt, SRB0_BASE);
init_unused_ring(gt, SRB1_BASE);
init_unused_ring(gt, SRB2_BASE);
init_unused_ring(gt, SRB3_BASE);
} else if (IS_GEN(i915, 2)) {
init_unused_ring(gt, SRB0_BASE);
init_unused_ring(gt, SRB1_BASE);
} else if (IS_GEN(i915, 3)) {
init_unused_ring(gt, PRB1_BASE);
init_unused_ring(gt, PRB2_BASE);
}
}
int intel_gt_init_hw(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
int ret;
BUG_ON(!i915->kernel_context);
ret = intel_gt_terminally_wedged(gt);
if (ret)
return ret;
gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
MI_PREDICATE_RESULT_2,
IS_HSW_GT3(i915) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
intel_gt_apply_workarounds(gt);
/* ...and determine whether they are sticking. */
intel_gt_verify_workarounds(gt, "init");
intel_gt_init_swizzling(gt);
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(gt);
ret = i915_ppgtt_init_hw(gt);
if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(&gt->uc);
if (ret) {
i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
goto out;
}
intel_mocs_init(gt);
out:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
@ -207,11 +304,12 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
struct intel_uncore *uncore = gt->uncore;
unsigned long flags;
spin_lock_irq(&uncore->lock);
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
spin_unlock_irq(&uncore->lock);
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@ -222,7 +320,13 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
intel_gtt_chipset_flush();
}
int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
void intel_gt_driver_register(struct intel_gt *gt)
{
if (IS_GEN(gt->i915, 5))
intel_gpu_ips_init(gt->i915);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@ -230,7 +334,7 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
int ret;
obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
@ -256,11 +360,44 @@ err_unref:
return ret;
}
void intel_gt_fini_scratch(struct intel_gt *gt)
static void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(&gt->scratch, 0);
}
int intel_gt_init(struct intel_gt *gt)
{
int err;
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
return err;
intel_gt_pm_init(gt);
return 0;
}
void intel_gt_driver_remove(struct intel_gt *gt)
{
GEM_BUG_ON(gt->awake);
intel_gt_pm_disable(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_gpu_ips_teardown();
}
void intel_gt_driver_release(struct intel_gt *gt)
{
/* Paranoia: make sure we have disabled everything before we exit. */
intel_gt_pm_disable(gt);
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
}
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);

View File

@ -28,7 +28,14 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw(struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct drm_i915_private *i915);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release(struct intel_gt *gt);
@ -41,9 +48,6 @@ void intel_gt_chipset_flush(struct intel_gt *gt);
void intel_gt_init_hangcheck(struct intel_gt *gt);
int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
void intel_gt_fini_scratch(struct intel_gt *gt);
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{

View File

@ -5,16 +5,20 @@
*/
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_params.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_wakeref.h"
static void pm_notify(struct drm_i915_private *i915, int state)
static void pm_notify(struct intel_gt *gt, int state)
{
blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
blocking_notifier_call_chain(&gt->pm_notifications, state, gt->i915);
}
static int __gt_unpark(struct intel_wakeref *wf)
@ -24,6 +28,8 @@ static int __gt_unpark(struct intel_wakeref *wf)
GEM_TRACE("\n");
i915_globals_unpark();
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@ -47,21 +53,23 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
intel_gt_queue_hangcheck(gt);
intel_gt_unpark_requests(gt);
pm_notify(i915, INTEL_GT_UNPARK);
pm_notify(gt, INTEL_GT_UNPARK);
return 0;
}
static int __gt_park(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
pm_notify(i915, INTEL_GT_PARK);
pm_notify(gt, INTEL_GT_PARK);
intel_gt_park_requests(gt);
i915_pmu_gt_parked(i915);
if (INTEL_GEN(i915) >= 6)
@ -73,6 +81,8 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
i915_globals_park();
return 0;
}
@ -89,6 +99,16 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
}
void intel_gt_pm_init(struct intel_gt *gt)
{
/*
* Enabling power-management should be "self-healing". If we cannot
* enable a feature, simply leave it disabled with a notice to the
* user.
*/
intel_rc6_init(&gt->rc6);
}
static bool reset_engines(struct intel_gt *gt)
{
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
@ -123,6 +143,17 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
__intel_engine_reset(engine, false);
}
void intel_gt_pm_disable(struct intel_gt *gt)
{
if (!is_mock_gt(gt))
intel_sanitize_gt_powersave(gt->i915);
}
void intel_gt_pm_fini(struct intel_gt *gt)
{
intel_rc6_fini(&gt->rc6);
}
int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
@ -136,14 +167,21 @@ int intel_gt_resume(struct intel_gt *gt)
* allowing us to fixup the user contexts on their first pin.
*/
intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
for_each_engine(engine, gt->i915, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context;
if (ce)
if (ce) {
GEM_BUG_ON(!intel_context_is_pinned(ce));
mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
ce->ops->reset(ce);
mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
}
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@ -156,11 +194,38 @@ int intel_gt_resume(struct intel_gt *gt)
break;
}
}
intel_rc6_enable(&gt->rc6);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
}
static void wait_for_idle(struct intel_gt *gt)
{
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
}
intel_gt_pm_wait_for_idle(gt);
}
void intel_gt_suspend(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
/* We expect to be idle already; but also want to be independent */
wait_for_idle(gt);
with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
intel_rc6_disable(&gt->rc6);
}
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
@ -172,3 +237,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
return intel_uc_runtime_resume(&gt->uc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif

View File

@ -43,10 +43,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
}
void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_pm_init(struct intel_gt *gt);
void intel_gt_pm_disable(struct intel_gt *gt);
void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
int intel_gt_resume(struct intel_gt *gt);
void intel_gt_suspend(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
}
#endif /* INTEL_GT_PM_H */

View File

@ -0,0 +1,123 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_request.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_timeline.h"
static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags;
bool interruptible;
LIST_HEAD(free);
interruptible = true;
if (unlikely(timeout < 0))
timeout = -timeout, interruptible = false;
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;
intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);
if (timeout > 0) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
timeout = dma_fence_wait_timeout(fence,
true,
timeout);
dma_fence_put(fence);
}
}
retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link);
mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(tl->active_count);
list_add(&tl->link, &free);
}
}
spin_unlock_irqrestore(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
return active_count ? timeout : 0;
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
{
/* If the device is asleep, we have no requests outstanding */
if (!intel_gt_pm_is_awake(gt))
return 0;
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return timeout;
}
static void retire_work_handler(struct work_struct *work)
{
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
intel_gt_retire_requests(gt);
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
void intel_gt_init_requests(struct intel_gt *gt)
{
INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
}
void intel_gt_park_requests(struct intel_gt *gt)
{
cancel_delayed_work(&gt->requests.retire_work);
}
void intel_gt_unpark_requests(struct intel_gt *gt)
{
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}

View File

@ -0,0 +1,24 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
struct intel_gt;
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
static inline void intel_gt_retire_requests(struct intel_gt *gt)
{
intel_gt_retire_requests_timeout(gt, 0);
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);
#endif /* INTEL_GT_REQUESTS_H */

View File

@ -18,6 +18,7 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
@ -49,7 +50,19 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;
struct intel_gt_requests {
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
} requests;
struct intel_wakeref wakeref;
atomic_t user_wakeref;
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
@ -66,6 +79,8 @@ struct intel_gt {
*/
intel_wakeref_t awake;
struct intel_rc6 rc6;
struct blocking_notifier_head pm_notifications;
ktime_t last_init_time;
@ -88,9 +103,6 @@ enum intel_gt_scratch_field {
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,

View File

@ -53,6 +53,7 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
static bool subunits_stuck(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct intel_instdone instdone;
struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
@ -71,7 +72,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
stuck &= instdone_unchanged(instdone.slice_common,
&accu_instdone->slice_common);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) {
stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
&accu_instdone->sampler[slice][subslice]);
stuck &= instdone_unchanged(instdone.row[slice][subslice],
@ -283,7 +284,7 @@ static void hangcheck_elapsed(struct work_struct *work)
for_each_engine(engine, gt->i915, id) {
struct hangcheck hc;
intel_engine_signal_breadcrumbs(engine);
intel_engine_breadcrumbs_irq(engine);
hangcheck_load_sample(engine, &hc);
hangcheck_accumulate_sample(engine, &hc);

File diff suppressed because it is too large Load Diff

View File

@ -66,6 +66,12 @@ struct intel_engine_cs;
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
/* in Gen12 ID 0x7FF is reserved to indicate idle */
#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
@ -104,6 +110,10 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
/* Space within PPHWSP reserved to be used as scratch */
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
void intel_lr_context_reset(struct intel_engine_cs *engine,
@ -131,4 +141,8 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
struct intel_engine_cs *
intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
unsigned int sibling);
#endif /* _INTEL_LRC_H_ */

View File

@ -9,55 +9,41 @@
#include <linux/types.h>
/* GEN8+ Reg State Context */
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
#define CTX_RING_HEAD 0x04
#define CTX_RING_TAIL 0x06
#define CTX_RING_BUFFER_START 0x08
#define CTX_RING_BUFFER_CONTROL 0x0a
#define CTX_BB_HEAD_U 0x0c
#define CTX_BB_HEAD_L 0x0e
#define CTX_BB_STATE 0x10
#define CTX_SECOND_BB_HEAD_U 0x12
#define CTX_SECOND_BB_HEAD_L 0x14
#define CTX_SECOND_BB_STATE 0x16
#define CTX_BB_PER_CTX_PTR 0x18
#define CTX_RCS_INDIRECT_CTX 0x1a
#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
#define CTX_LRI_HEADER_1 0x21
#define CTX_CTX_TIMESTAMP 0x22
#define CTX_PDP3_UDW 0x24
#define CTX_PDP3_LDW 0x26
#define CTX_PDP2_UDW 0x28
#define CTX_PDP2_LDW 0x2a
#define CTX_PDP1_UDW 0x2c
#define CTX_PDP1_LDW 0x2e
#define CTX_PDP0_UDW 0x30
#define CTX_PDP0_LDW 0x32
#define CTX_LRI_HEADER_2 0x41
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_END 0x44
/* GEN8 to GEN11 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_BUFFER_START (0x08 + 1)
#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
#define CTX_PDP2_UDW (0x28 + 1)
#define CTX_PDP2_LDW (0x2a + 1)
#define CTX_PDP1_UDW (0x2c + 1)
#define CTX_PDP1_LDW (0x2e + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
#define CTX_R_PWR_CLK_STATE (0x42 + 1)
#define CTX_REG(reg_state, pos, reg, val) do { \
u32 *reg_state__ = (reg_state); \
const u32 pos__ = (pos); \
(reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
(reg_state__)[(pos__) + 1] = (val); \
} while (0)
#define GEN9_CTX_RING_MI_MODE 0x54
/* GEN12+ Reg State Context */
#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
(reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
(reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
} while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(ppgtt->pd); \
(reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
(reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17

Some files were not shown because too many files have changed in this diff Show More