Merge tag 'drm-intel-next-2024-02-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 feature pull for v6.9:

Features and functionality:
- Early transport for panel replay and PSR (Jouni)
- New ARL PCI IDs (Matt)
- DP TPS4 PHY test pattern support (Khaled)

Refactoring and cleanups:
- Unify and improve VSC SDP for PSR and non-PSR cases (Jouni)
- Refactor memory regions and improve debug logging (Ville)
- Rework global state serialization (Ville)
- Remove unused CDCLK divider fields (Gustavo)
- Unify HDCP connector logging format (Jani)
- Use display instead of graphics version in display code (Jani)
- Move VBT and opregion debugfs next to the implementation (Jani)
- Abstract opregion interface, use opaque type (Jani)

Fixes:
- Fix MTL stolen memory access (Ville)
- Fix initial display plane readout for MTL (Ville)
- Fix HPD handling during driver init/shutdown (Imre)
- Cursor vblank evasion fixes (Ville)
- Various VSC SDP fixes (Jouni)
- Allow PSR mode changes without full modeset (Jouni)
- Fix CDCLK sanitization on module load for Xe2_LPD (Gustavo)
- Fix the max DSC bpc supported by the source (Ankit)
- Add missing LNL ALPM AUX wake configuration (Jouni)
- Cx0 PHY state readout and verify fixes (Mika)
- Fix PSR (panel replay) debugfs for MST connectors (Imre)
- Fail HDCP repeater authentication if Type1 device not present (Suraj)
- Ratelimit debug logging in vm_fault_ttm (Nirmoy)
- Use a fake PCH for MTL because south display is not on the PCH (Haridhar)
- Disable DSB for Xe driver for now (José)
- Fix some LNL display register changes (Lucas)
- Fix build on ChromeOS (Paz Zcharya)
- Preserve current shared DPLL for fastsets on Type-C ports (Ville)
- Fix state checker warnings for MG/TC/TBT PLLs (Ville)
- Fix HDCP repeater ctl register value on errors (Jani)
- Allow FBC with CCS modifiers on SKL+ (Ville)
- Fix HDCP GGTT pinning (Ville)

DRM core changes:
- Add ratelimited drm dbg print (Nirmoy)
- DPCD PSR early transport macro (Jouni)

Merges:
- Backmerge drm-next to bring Xe driver to drm-intel-next (Jani)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87cyt8cxsh.fsf@intel.com
This commit is contained in:
Dave Airlie 2024-02-16 06:52:03 +10:00
commit b13cfb445c
86 changed files with 2354 additions and 1148 deletions

View file

@ -1060,3 +1060,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->fb = intel_fb;
}
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 base;
if (!plane_state->uapi.visible)
return false;
base = intel_plane_ggtt_offset(plane_state);
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
if (plane_config->base == base)
return false;
if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, DSPSURF(i9xx_plane), base);
else
intel_de_write(dev_priv, DSPADDR(i9xx_plane), base);
return true;
}

View file

@ -26,6 +26,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
#else
static inline unsigned int i965_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@ -46,6 +48,11 @@ static inline void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
}
static inline bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
return false;
}
#endif
#endif

View file

@ -217,6 +217,9 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
int width, height;
unsigned int rel_data_rate;
if (plane->id == PLANE_CURSOR)
return 0;
if (!plane_state->uapi.visible)
return 0;
@ -244,9 +247,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate = width * height * fb->format->cpp[color_plane];
if (plane->id == PLANE_CURSOR)
return rel_data_rate;
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
rel_data_rate);

View file

@ -1465,7 +1465,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int
if (controller == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) < PCH_MTP)
INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;

View file

@ -2204,8 +2204,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
if (IS_DGFX(i915))
return vbt_pin;
if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
IS_ALDERLAKE_P(i915)) {
if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (IS_ALDERLAKE_S(i915)) {
@ -3074,7 +3073,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
*/
void intel_bios_init(struct drm_i915_private *i915)
{
const struct vbt_header *vbt = i915->display.opregion.vbt;
const struct vbt_header *vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
@ -3089,6 +3088,8 @@ void intel_bios_init(struct drm_i915_private *i915)
init_vbt_defaults(i915);
vbt = intel_opregion_get_vbt(i915, NULL);
/*
* If the OpRegion does not have VBT, look in SPI flash through MMIO or
* PCI mapping
@ -3306,7 +3307,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
if (i915->display.opregion.vbt)
if (intel_opregion_get_vbt(i915, NULL))
return true;
}
@ -3657,3 +3658,30 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
func(i915, devdata);
}
static int intel_bios_vbt_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
const void *vbt;
size_t vbt_size;
/*
* FIXME: VBT might originate from other places than opregion, and then
* this would be incorrect.
*/
vbt = intel_opregion_get_vbt(i915, &vbt_size);
if (vbt)
seq_write(m, vbt, vbt_size);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
void intel_bios_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
i915, &intel_bios_vbt_fops);
}

View file

@ -246,13 +246,10 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
@ -283,4 +280,6 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915,
void (*func)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata));
void intel_bios_debugfs_register(struct drm_i915_private *i915);
#endif /* _INTEL_BIOS_H_ */

View file

@ -1227,183 +1227,182 @@ struct intel_cdclk_vals {
u32 cdclk;
u16 refclk;
u16 waveform;
u8 divider; /* CD2X divider * 2 */
u8 ratio;
};
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
{ .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
{ .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
{ .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
{ .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
{ .refclk = 19200, .cdclk = 144000, .ratio = 60 },
{ .refclk = 19200, .cdclk = 288000, .ratio = 60 },
{ .refclk = 19200, .cdclk = 384000, .ratio = 60 },
{ .refclk = 19200, .cdclk = 576000, .ratio = 60 },
{ .refclk = 19200, .cdclk = 624000, .ratio = 65 },
{}
};
static const struct intel_cdclk_vals glk_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
{ .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
{ .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
{ .refclk = 19200, .cdclk = 79200, .ratio = 33 },
{ .refclk = 19200, .cdclk = 158400, .ratio = 33 },
{ .refclk = 19200, .cdclk = 316800, .ratio = 33 },
{}
};
static const struct intel_cdclk_vals icl_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
{ .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
{ .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
{ .refclk = 19200, .cdclk = 172800, .ratio = 18 },
{ .refclk = 19200, .cdclk = 192000, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .ratio = 32 },
{ .refclk = 19200, .cdclk = 326400, .ratio = 68 },
{ .refclk = 19200, .cdclk = 556800, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .ratio = 68 },
{ .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 24000, .cdclk = 180000, .ratio = 15 },
{ .refclk = 24000, .cdclk = 192000, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .ratio = 26 },
{ .refclk = 24000, .cdclk = 324000, .ratio = 54 },
{ .refclk = 24000, .cdclk = 552000, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .ratio = 54 },
{ .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
{ .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{ .refclk = 38400, .cdclk = 172800, .ratio = 9 },
{ .refclk = 38400, .cdclk = 192000, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16 },
{ .refclk = 38400, .cdclk = 326400, .ratio = 34 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rkl_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 },
{ .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 },
{ .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 },
{ .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
{ .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
{ .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },
{ .refclk = 19200, .cdclk = 172800, .ratio = 36 },
{ .refclk = 19200, .cdclk = 192000, .ratio = 40 },
{ .refclk = 19200, .cdclk = 307200, .ratio = 64 },
{ .refclk = 19200, .cdclk = 326400, .ratio = 136 },
{ .refclk = 19200, .cdclk = 556800, .ratio = 116 },
{ .refclk = 19200, .cdclk = 652800, .ratio = 136 },
{ .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 },
{ .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 },
{ .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 },
{ .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
{ .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 },
{ .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },
{ .refclk = 24000, .cdclk = 180000, .ratio = 30 },
{ .refclk = 24000, .cdclk = 192000, .ratio = 32 },
{ .refclk = 24000, .cdclk = 312000, .ratio = 52 },
{ .refclk = 24000, .cdclk = 324000, .ratio = 108 },
{ .refclk = 24000, .cdclk = 552000, .ratio = 92 },
{ .refclk = 24000, .cdclk = 648000, .ratio = 108 },
{ .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
{ .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
{ .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
{ .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
{ .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
{ .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
{ .refclk = 38400, .cdclk = 172800, .ratio = 18 },
{ .refclk = 38400, .cdclk = 192000, .ratio = 20 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 32 },
{ .refclk = 38400, .cdclk = 326400, .ratio = 68 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 58 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 68 },
{}
};
static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
{ .refclk = 19200, .cdclk = 307200, .ratio = 32 },
{ .refclk = 19200, .cdclk = 556800, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .ratio = 68 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 24000, .cdclk = 312000, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .ratio = 46 },
{ .refclk = 24400, .cdclk = 648000, .ratio = 54 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
{ .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
{ .refclk = 19200, .cdclk = 172800, .ratio = 27 },
{ .refclk = 19200, .cdclk = 192000, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .ratio = 32 },
{ .refclk = 19200, .cdclk = 556800, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .ratio = 68 },
{ .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 24000, .cdclk = 176000, .ratio = 22 },
{ .refclk = 24000, .cdclk = 192000, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{ .refclk = 38400, .cdclk = 179200, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals rplu_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
{ .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
{ .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
{ .refclk = 19200, .cdclk = 172800, .ratio = 27 },
{ .refclk = 19200, .cdclk = 192000, .ratio = 20 },
{ .refclk = 19200, .cdclk = 307200, .ratio = 32 },
{ .refclk = 19200, .cdclk = 480000, .ratio = 50 },
{ .refclk = 19200, .cdclk = 556800, .ratio = 58 },
{ .refclk = 19200, .cdclk = 652800, .ratio = 68 },
{ .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 24000, .cdclk = 176000, .ratio = 22 },
{ .refclk = 24000, .cdclk = 192000, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .ratio = 26 },
{ .refclk = 24000, .cdclk = 480000, .ratio = 40 },
{ .refclk = 24000, .cdclk = 552000, .ratio = 46 },
{ .refclk = 24000, .cdclk = 648000, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
{ .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
{ .refclk = 38400, .cdclk = 179200, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .ratio = 10 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16 },
{ .refclk = 38400, .cdclk = 480000, .ratio = 25 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34 },
{}
};
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
{ .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
{ .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 },
{ .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a },
{ .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 163200, .ratio = 34, .waveform = 0x8888 },
{ .refclk = 38400, .cdclk = 204000, .ratio = 34, .waveform = 0x9248 },
{ .refclk = 38400, .cdclk = 244800, .ratio = 34, .waveform = 0xa4a4 },
{ .refclk = 38400, .cdclk = 285600, .ratio = 34, .waveform = 0xa54a },
{ .refclk = 38400, .cdclk = 326400, .ratio = 34, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 367200, .ratio = 34, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 408000, .ratio = 34, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 448800, .ratio = 34, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 489600, .ratio = 34, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 530400, .ratio = 34, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
static const struct intel_cdclk_vals mtl_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0x0000 },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0x0000 },
{}
};
static const struct intel_cdclk_vals lnl_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 153600, .divider = 2, .ratio = 16, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 211200, .divider = 2, .ratio = 16, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 230400, .divider = 2, .ratio = 16, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 249600, .divider = 2, .ratio = 16, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 268800, .divider = 2, .ratio = 16, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 288000, .divider = 2, .ratio = 16, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 330000, .divider = 2, .ratio = 25, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 360000, .divider = 2, .ratio = 25, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 390000, .divider = 2, .ratio = 25, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 420000, .divider = 2, .ratio = 25, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 450000, .divider = 2, .ratio = 25, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 487200, .divider = 2, .ratio = 29, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 522000, .divider = 2, .ratio = 29, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
{ .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 330000, .ratio = 25, .waveform = 0xdbb6 },
{ .refclk = 38400, .cdclk = 360000, .ratio = 25, .waveform = 0xeeee },
{ .refclk = 38400, .cdclk = 390000, .ratio = 25, .waveform = 0xf7de },
{ .refclk = 38400, .cdclk = 420000, .ratio = 25, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 450000, .ratio = 25, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 487200, .ratio = 29, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 522000, .ratio = 29, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
{ .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe },
{ .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe },
{ .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
{}
};
@ -1901,9 +1900,9 @@ static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.vco > 0;
}
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
@ -1911,6 +1910,38 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
u16 waveform;
u32 val;
waveform = cdclk_squash_waveform(i915, cdclk);
unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
cdclk_squash_divider(waveform));
val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) |
bxt_cdclk_cd2x_pipe(i915, pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) &&
cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (DISPLAY_VER(i915) >= 20)
val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
else
val |= skl_cdclk_decimal(cdclk);
return val;
}
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u16 waveform;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
@ -1926,29 +1957,10 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
cdclk_squash_divider(waveform));
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (DISPLAY_VER(dev_priv) >= 20)
val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
else
val |= skl_cdclk_decimal(cdclk);
intel_de_write(dev_priv, CDCLK_CTL, val);
intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
if (pipe != INVALID_PIPE)
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@ -2039,7 +2051,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
int cdclk, clock, vco;
int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
@ -2048,20 +2060,6 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
*
* Some BIOS versions leave an incorrect decimal frequency value and
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
cdctl = intel_de_read(dev_priv, CDCLK_CTL);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
/* Make sure this is a legal cdclk value for the platform */
cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
if (cdclk != dev_priv->display.cdclk.hw.cdclk)
@ -2072,24 +2070,21 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
if (HAS_CDCLK_SQUASH(dev_priv))
clock = dev_priv->display.cdclk.hw.vco / 2;
else
clock = dev_priv->display.cdclk.hw.cdclk;
expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
dev_priv->display.cdclk.hw.vco);
/*
* Some BIOS versions leave an incorrect decimal frequency value and
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
dev_priv->display.cdclk.hw.cdclk >= 500000)
expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
if (cdctl == expected)
/* All well; nothing to sanitize */
@ -3467,15 +3462,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
u32 freq;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
freq = dg1_rawclk(dev_priv);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
freq = dg1_rawclk(dev_priv);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))

View file

@ -42,6 +42,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@ -846,6 +847,9 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
if (!intel_display_driver_check_access(dev_priv))
return connector->status;
if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
@ -1069,6 +1073,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv)) {
assert_port_valid(dev_priv, PORT_E);

View file

@ -461,70 +461,6 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
static int intel_mode_vblank_start(const struct drm_display_mode *mode)
{
int vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
return vblank_start;
}
static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
struct intel_crtc *crtc,
int *min, int *max, int *vblank_start)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
/*
* During fastsets/etc. the transcoder is still
* running with the old timings at this point.
*
* TODO: maybe just use the active timings here?
*/
if (intel_crtc_needs_modeset(new_crtc_state))
crtc_state = new_crtc_state;
else
crtc_state = old_crtc_state;
adjusted_mode = &crtc_state->hw.adjusted_mode;
if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
/* timing changes should happen with VRR disabled */
drm_WARN_ON(state->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr);
if (intel_vrr_is_push_sent(crtc_state))
*vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
*vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
} else {
*vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
*min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
*max = *vblank_start - 1;
/*
* M/N and TRANS_VTOTAL are double buffered on the transcoder's
* undelayed vblank, so with seamless M/N and LRR we must evade
* both vblanks.
*
* DSB execution waits for the transcoder's undelayed vblank,
* hence we must kick off the commit before that.
*/
if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
*min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
}
/**
* intel_pipe_update_start() - start update of a set of display registers
* @state: the atomic state
@ -542,14 +478,12 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
struct intel_vblank_evade_ctx evade;
int scanline;
intel_psr_lock(new_crtc_state);
@ -566,9 +500,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
if (min <= 0 || max <= 0)
goto irq_disable;
intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
@ -582,58 +514,14 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
crtc->debug.min_vbl = evade.min;
crtc->debug.max_vbl = evade.max;
trace_intel_pipe_update_start(crtc);
for (;;) {
/*
* prepare_to_wait() has a memory barrier, which guarantees
* other CPUs can see the task state update by the time we
* read the scanline.
*/
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
scanline = intel_get_crtc_scanline(crtc);
if (scanline < min || scanline > max)
break;
if (!timeout) {
drm_err(&dev_priv->drm,
"Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
}
local_irq_enable();
timeout = schedule_timeout(timeout);
local_irq_disable();
}
finish_wait(wq, &wait);
scanline = intel_vblank_evade(&evade);
drm_crtc_vblank_put(&crtc->base);
/*
* On VLV/CHV DSI the scanline counter would appear to
* increment approx. 1/3 of a scanline before start of vblank.
* The registers still get latched at start of vblank however.
* This means we must not write any registers on the first
* line of vblank (since not the whole line is actually in
* vblank). And unfortunately we can't use the interrupt to
* wait here since it will fire too soon. We could use the
* frame start interrupt instead since it will fire after the
* critical scanline, but that would require more changes
* in the interrupt code. So for now we'll just do the nasty
* thing and poll for the bad scanline to pass us by.
*
* FIXME figure out if BXT+ DSI suffers from this as well
*/
while (need_vlv_dsi_wa && scanline == vblank_start)
scanline = intel_get_crtc_scanline(crtc);
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);

View file

@ -22,6 +22,7 @@
#include "intel_frontbuffer.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_vblank.h"
#include "skl_watermark.h"
#include "gem/i915_gem_object.h"
@ -47,12 +48,23 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
return base + plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool early_tpt)
{
int x = plane_state->uapi.dst.x1;
int y = plane_state->uapi.dst.y1;
u32 pos = 0;
/*
* Formula from Bspec:
* MAX(-1 * <Cursor vertical size from CUR_CTL base on cursor mode
* select setting> + 1, CUR_POS Y Position - Update region Y position
*/
if (early_tpt)
y = max(-1 * drm_rect_height(&plane_state->uapi.dst) + 1,
y - crtc_state->psr2_su_area.y1);
if (x < 0) {
pos |= CURSOR_POS_X_SIGN;
x = -x;
@ -274,7 +286,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
pos = intel_cursor_position(crtc_state, plane_state, false);
}
/* On these chipsets we can only modify the base/size/stride
@ -503,17 +515,24 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) {
if (crtc_state->enable_psr2_su_region_et) {
u32 val = intel_cursor_position(crtc_state, plane_state,
true);
intel_de_write_fw(dev_priv, CURPOS_ERLY_TPT(pipe), val);
}
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
plane_state->ctl);
else
} else {
i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
}
}
/* TODO: split into noarm+arm pair */
@ -536,7 +555,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
pos = intel_cursor_position(crtc_state, plane_state, false);
}
/*
@ -647,12 +666,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
struct intel_vblank_evade_ctx evade;
int ret;
/*
@ -745,13 +766,25 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
/*
* Technically we should do a vblank evasion here to make
* sure all the cursor registers update on the same frame.
* For now just make sure the register writes happen as
* quickly as possible to minimize the race window.
*/
local_irq_disable();
intel_vblank_evade_init(crtc_state, crtc_state, &evade);
intel_psr_lock(crtc_state);
if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) {
/*
* TODO: maybe check if we're still in PSR
* and skip the vblank evasion entirely?
*/
intel_psr_wait_for_idle_locked(crtc_state);
local_irq_disable();
intel_vblank_evade(&evade);
drm_crtc_vblank_put(&crtc->base);
} else {
local_irq_disable();
}
if (new_plane_state->uapi.visible) {
intel_plane_update_noarm(plane, crtc_state, new_plane_state);
@ -762,6 +795,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
local_irq_enable();
intel_psr_unlock(crtc_state);
intel_plane_unpin_fb(old_plane_state);
out_free:

View file

@ -78,7 +78,7 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
intel_de_rmw(i915,
XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane),
XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane),
XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
XELPDP_PORT_MSGBUS_TIMER_VAL);
}
@ -117,7 +117,7 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
enum port port, int lane)
{
intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
@ -125,10 +125,10 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
{
enum phy phy = intel_port_to_phy(i915, port);
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
@ -144,7 +144,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
enum phy phy = intel_port_to_phy(i915, port);
if (__intel_de_wait_for_register(i915,
XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_MSGBUS_TIMEOUT_FAST_US,
@ -152,7 +152,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) &
if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) &
XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
drm_dbg_kms(&i915->drm,
"PHY %c Hardware did not detect a timeout\n",
@ -186,7 +186,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@ -195,7 +195,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
@ -253,7 +253,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
int ack;
u32 val;
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@ -262,14 +262,14 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
return -ETIMEDOUT;
}
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@ -282,7 +282,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
@ -2096,13 +2096,54 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
return intel_c20pll_calc_state(crtc_state, encoder);
}
static bool intel_c20_use_mplla(u32 clock)
static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
{
/* 10G and 20G rates use MPLLA */
if (clock == 1000000 || clock == 2000000)
return true;
return state->tx[0] & C20_PHY_USE_MPLLB;
}
return false;
static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_c20pll_state *pll_state)
{
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
unsigned int multiplier, refclk = 38400;
unsigned int tx_clk_div;
unsigned int ref_clk_mpllb_div;
unsigned int fb_clk_div4_en;
unsigned int ref, vco;
unsigned int tx_rate_mult;
unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
if (intel_c20phy_use_mpllb(pll_state)) {
tx_rate_mult = 1;
frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
frac_quot = pll_state->mpllb[8];
frac_rem = pll_state->mpllb[9];
frac_den = pll_state->mpllb[7];
multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
fb_clk_div4_en = 0;
} else {
tx_rate_mult = 2;
frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
frac_quot = pll_state->mplla[8];
frac_rem = pll_state->mplla[9];
frac_den = pll_state->mplla[7];
multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
}
if (frac_en)
frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
else
frac = 0;
ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
}
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
@ -2138,7 +2179,7 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
PHY_C20_A_CMN_CNTX_CFG(i));
}
if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
if (intel_c20phy_use_mpllb(pll_state)) {
/* MPLLB configuration */
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
@ -2160,6 +2201,8 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
}
}
pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state);
intel_cx0_phy_transaction_end(encoder, wakeref);
}
@ -2174,12 +2217,12 @@ void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
if (intel_c20_use_mplla(hw_state->clock)) {
for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
} else {
if (intel_c20phy_use_mpllb(hw_state)) {
for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
} else {
for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
}
}
@ -2326,18 +2369,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
if (intel_c20_use_mplla(clock)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
}
} else {
if (intel_c20phy_use_mpllb(pll_state)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
@ -2348,6 +2380,17 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
PHY_C20_B_MPLLB_CNTX_CFG(i),
pll_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
PHY_C20_A_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
else
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
PHY_C20_B_MPLLA_CNTX_CFG(i),
pll_state->mplla[i]);
}
}
/* 4. Program custom width to match the link protocol */
@ -2408,51 +2451,6 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_c20pll_state *pll_state)
{
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
unsigned int multiplier, refclk = 38400;
unsigned int tx_clk_div;
unsigned int ref_clk_mpllb_div;
unsigned int fb_clk_div4_en;
unsigned int ref, vco;
unsigned int tx_rate_mult;
unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
tx_rate_mult = 1;
frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
frac_quot = pll_state->mpllb[8];
frac_rem = pll_state->mpllb[9];
frac_den = pll_state->mpllb[7];
multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
fb_clk_div4_en = 0;
} else {
tx_rate_mult = 2;
frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
frac_quot = pll_state->mplla[8];
frac_rem = pll_state->mplla[9];
frac_den = pll_state->mplla[7];
multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
}
if (frac_en)
frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
else
frac = 0;
ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
}
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool lane_reversal)
@ -2460,7 +2458,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 val = 0;
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
XELPDP_PORT_REVERSAL,
lane_reversal ? XELPDP_PORT_REVERSAL : 0);
if (lane_reversal)
@ -2481,7 +2480,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
else
val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
@ -2514,15 +2513,16 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
u8 lane_mask, u8 state)
{
enum phy phy = intel_port_to_phy(i915, port);
i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
int lane;
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
intel_cx0_get_powerdown_state(lane_mask, state));
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
drm_dbg_kms(&i915->drm,
@ -2531,12 +2531,12 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
intel_cx0_bus_reset(i915, port, lane);
}
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
intel_de_rmw(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
@ -2545,10 +2545,10 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
{
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
@ -2593,27 +2593,27 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
lane_pipe_reset);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
lane_phy_current_status, lane_phy_current_status,
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
intel_cx0_get_pclk_refclk_ack(lane_mask),
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
@ -2624,9 +2624,10 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
CX0_P2_STATE_RESET);
intel_cx0_setup_powerdown(i915, port);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0);
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status,
if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port),
lane_phy_current_status,
XELPDP_PORT_RESET_END_TIMEOUT))
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
@ -2761,12 +2762,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
* LN<Lane for maxPCLK> to "1" to enable PLL.
*/
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
@ -2786,7 +2787,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 clock;
u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
@ -2839,11 +2840,11 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
*/
val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
val |= XELPDP_FORWARD_CLOCK_UNGATE;
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
/*
* 3. Follow the Display Voltage Frequency Switching - Sequence
@ -2854,10 +2855,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
*/
val |= XELPDP_TBT_CLOCK_REQUEST;
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val);
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK,
XELPDP_TBT_CLOCK_ACK,
100, 0, NULL))
@ -2909,7 +2910,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
* 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
* to "0" to disable PLL.
*/
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
@ -2919,7 +2920,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
@ -2932,9 +2933,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
*/
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK, 0);
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
intel_cx0_phy_transaction_end(encoder, wakeref);
@ -2953,11 +2954,11 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
*/
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
@ -2970,7 +2971,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@ -2997,7 +2998,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* TODO: Determine the PLL type from the SW state, once MTL PLL
* handling is done via the standard shared DPLL framework.
*/
u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
@ -3016,6 +3017,9 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
int i;
if (intel_crtc_needs_fastset(state))
return;
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
u8 expected = mpllb_sw_state->pll[i];
@ -3067,10 +3071,15 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int i;
I915_STATE_WARN(i915, mpll_hw_state->clock != mpll_sw_state->clock,
"[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)",
crtc->base.base.id, crtc->base.name,
mpll_sw_state->clock, mpll_hw_state->clock);
I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
"[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
crtc->base.base.id, crtc->base.name,

View file

@ -7,16 +7,39 @@
#define __INTEL_CX0_PHY_REGS_H__
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
/*
* Wrapper macro to convert from port number to the index used in some of the
* registers. For Display version 20 and above it converts the port number to a
* single range, starting with the TC offsets. When used together with
* _PICK_EVEN_2RANGES(idx, PORT_TC1, ...), this single range will be the second
* range. Example:
*
* PORT_TC1 -> PORT_TC1
* PORT_TC2 -> PORT_TC2
* PORT_TC3 -> PORT_TC3
* PORT_TC4 -> PORT_TC4
* PORT_A -> PORT_TC4 + 1
* PORT_B -> PORT_TC4 + 2
* ...
*/
#define __xe2lpd_port_idx(port) \
(port >= PORT_TC1 ? port : PORT_TC4 + 1 + port - PORT_A)
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240
#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440
#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_M2P_MSGBUS_CTL(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4)
#define XELPDP_PORT_M2P_MSGBUS_CTL(i915__, port, lane) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_M2P_MSGBUS_CTL(__xe2lpd_port_idx(port), lane) : \
_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
#define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31)
#define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
@ -27,11 +50,16 @@
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
#define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0)
#define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_P2M_MSGBUS_STATUS(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
_XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8)
#define XELPDP_PORT_P2M_MSGBUS_STATUS(i915__, port, lane) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_P2M_MSGBUS_STATUS(__xe2lpd_port_idx(port), lane) : \
_XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane))
#define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31)
#define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
#define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4
@ -54,11 +82,15 @@
#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200
#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400
#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_BUF_CTL1(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2))
#define XELPDP_PORT_BUF_CTL1(i915__, port) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_BUF_CTL1(__xe2lpd_port_idx(port)) : \
_XELPDP_PORT_BUF_CTL1(port))
#define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29)
#define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28)
#define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24)
@ -75,12 +107,15 @@
#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4)
#define XELPDP_PORT_BUF_CTL2(i915__, port) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_BUF_CTL2(__xe2lpd_port_idx(port)) : \
_XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
@ -95,11 +130,15 @@
#define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4)
#define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_BUF_CTL3(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
_XELPDP_PORT_BUF_CTL1_LN0_B, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
_XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8)
#define XELPDP_PORT_BUF_CTL3(i915__, port) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_BUF_CTL3(__xe2lpd_port_idx(port)) : \
_XELPDP_PORT_BUF_CTL3(port))
#define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8)
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
@ -114,11 +153,15 @@
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1 0x16f258
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2 0x16f458
#define XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_A, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_B, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1, \
_XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2) + (lane) * 4)
#define XELPDP_PORT_MSGBUS_TIMER(i915__, port, lane) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_MSGBUS_TIMER(__xe2lpd_port_idx(port), lane) : \
_XELPDP_PORT_MSGBUS_TIMER(port, lane))
#define XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT REG_BIT(31)
#define XELPDP_PORT_MSGBUS_TIMER_VAL_MASK REG_GENMASK(23, 0)
#define XELPDP_PORT_MSGBUS_TIMER_VAL REG_FIELD_PREP(XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, 0xa000)
@ -127,11 +170,15 @@
#define _XELPDP_PORT_CLOCK_CTL_B 0x641E0
#define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260
#define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460
#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
#define _XELPDP_PORT_CLOCK_CTL(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_CLOCK_CTL_A, \
_XELPDP_PORT_CLOCK_CTL_B, \
_XELPDP_PORT_CLOCK_CTL_USBC1, \
_XELPDP_PORT_CLOCK_CTL_USBC2))
#define XELPDP_PORT_CLOCK_CTL(i915__, port) \
(DISPLAY_VER(i915__) >= 20 ? \
_XELPDP_PORT_CLOCK_CTL(__xe2lpd_port_idx(port)) : \
_XELPDP_PORT_CLOCK_CTL(port))
#define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4))
#define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4))
#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4))

View file

@ -178,7 +178,7 @@ static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
int ret;
/* FIXME: find out why Bspec's 100us timeout is too short */
ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) &
ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) &
XELPDP_PORT_BUF_PHY_IDLE), 10000);
if (ret)
drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
@ -226,7 +226,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
}
if (DISPLAY_VER(dev_priv) >= 14)
ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE),
ret = _wait_for(!(intel_de_read(dev_priv,
XELPDP_PORT_BUF_CTL1(dev_priv, port)) &
XELPDP_PORT_BUF_PHY_IDLE),
timeout_us, 10, 10);
else
ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
@ -2429,13 +2431,22 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0,
XELPDP_PORT_BUF_D2D_LINK_ENABLE);
if (DISPLAY_VER(dev_priv) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) {
drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n",
intel_de_rmw(dev_priv, reg, 0, set_bits);
if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@ -2448,7 +2459,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 val;
val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
@ -2461,7 +2472,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
val |= XELPDP_PORT_REVERSAL;
intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@ -2472,7 +2483,7 @@ static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port),
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@ -2898,13 +2909,22 @@ mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0);
if (DISPLAY_VER(dev_priv) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
XELPDP_PORT_BUF_D2D_LINK_STATE), 100))
drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n",
intel_de_rmw(dev_priv, reg, clr_bits, 0);
if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@ -3038,7 +3058,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
/* De-select Thunderbolt */
if (DISPLAY_VER(dev_priv) >= 14)
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port),
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@ -3319,10 +3339,13 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
port_buf |= XELPDP_PORT_REVERSAL;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(lane_count);
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
} else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@ -3543,6 +3566,9 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
if (DISPLAY_VER(dev_priv) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
@ -3941,11 +3967,11 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
intel_psr_get_config(encoder, pipe_config);
intel_audio_codec_get_config(encoder, pipe_config);
}
@ -5117,6 +5143,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete;
encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete;
dig_port->lock = intel_tc_port_lock;
dig_port->unlock = intel_tc_port_unlock;
if (intel_tc_port_init(dig_port, is_legacy) < 0)
goto err;
}

View file

@ -104,6 +104,7 @@
#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
@ -2706,6 +2707,15 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
*/
intel_de_write(dev_priv, PIPESRC(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
if (!crtc_state->enable_psr2_su_region_et)
return;
width = drm_rect_width(&crtc_state->psr2_su_area);
height = drm_rect_height(&crtc_state->psr2_su_area);
intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@ -4764,7 +4774,11 @@ static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
return memcmp(a, b, sizeof(*a)) == 0;
return a->pixelformat == b->pixelformat &&
a->colorimetry == b->colorimetry &&
a->bpc == b->bpc &&
a->dynamic_range == b->dynamic_range &&
a->content_type == b->content_type;
}
static bool
@ -5045,8 +5059,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} while (0)
#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
if (!current_config->has_psr && !pipe_config->has_psr && \
!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
if (!intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
&current_config->infoframes.name, \
@ -5199,13 +5212,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(csc);
PIPE_CONF_CHECK_CSC(output_csc);
if (current_config->active_planes) {
PIPE_CONF_CHECK_BOOL(has_psr);
PIPE_CONF_CHECK_BOOL(has_psr2);
PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
PIPE_CONF_CHECK_I(dc3co_exitline);
}
}
PIPE_CONF_CHECK_BOOL(double_wide);
@ -6307,6 +6313,9 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
if (!intel_display_driver_check_access(dev_priv))
return -ENODEV;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
/*
@ -7068,6 +7077,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
intel_atomic_global_state_wait_for_dependencies(state);
/*
* During full modesets we write a lot of registers, wait
@ -7244,6 +7254,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
intel_atomic_global_state_commit_done(state);
if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
@ -7294,6 +7305,38 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock)
{
int ret;
ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (ret)
return ret;
ret = intel_atomic_global_state_setup_commit(state);
if (ret)
return ret;
return 0;
}
static int intel_atomic_swap_state(struct intel_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_swap_state(&state->base, true);
if (ret)
return ret;
intel_atomic_swap_global_state(state);
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
return 0;
}
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
@ -7339,11 +7382,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
return ret;
}
ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
ret = intel_atomic_setup_commit(state, nonblock);
if (!ret)
ret = drm_atomic_helper_swap_state(&state->base, true);
if (!ret)
intel_atomic_swap_global_state(state);
ret = intel_atomic_swap_state(state);
if (ret) {
struct intel_crtc_state *new_crtc_state;
@ -7357,8 +7398,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
@ -7811,6 +7850,7 @@ static const struct intel_display_funcs skl_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = skl_commit_modeset_enables,
.get_initial_plane_config = skl_get_initial_plane_config,
.fixup_initial_plane_config = skl_fixup_initial_plane_config,
};
static const struct intel_display_funcs ddi_display_funcs = {
@ -7819,6 +7859,7 @@ static const struct intel_display_funcs ddi_display_funcs = {
.crtc_disable = hsw_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs pch_split_display_funcs = {
@ -7827,6 +7868,7 @@ static const struct intel_display_funcs pch_split_display_funcs = {
.crtc_disable = ilk_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs vlv_display_funcs = {
@ -7835,6 +7877,7 @@ static const struct intel_display_funcs vlv_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
static const struct intel_display_funcs i9xx_display_funcs = {
@ -7843,6 +7886,7 @@ static const struct intel_display_funcs i9xx_display_funcs = {
.crtc_disable = i9xx_crtc_disable,
.commit_modeset_enables = intel_commit_modeset_enables,
.get_initial_plane_config = i9xx_get_initial_plane_config,
.fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
};
/**

View file

@ -28,6 +28,8 @@
#include "intel_opregion.h"
#include "intel_wm_types.h"
struct task_struct;
struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
@ -47,6 +49,7 @@ struct intel_fbdev;
struct intel_fdi_funcs;
struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
@ -64,6 +67,8 @@ struct intel_display_funcs {
struct intel_crtc_state *);
void (*get_initial_plane_config)(struct intel_crtc *,
struct intel_initial_plane_config *);
bool (*fixup_initial_plane_config)(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
void (*crtc_enable)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*crtc_disable)(struct intel_atomic_state *state,
@ -172,6 +177,12 @@ struct intel_hotplug {
struct work_struct poll_init_work;
bool poll_enabled;
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
* enabled. Protected by drm_i915_private::irq_lock.
*/
bool detection_work_enabled;
unsigned int hpd_storm_threshold;
/* Whether or not to count short HPD IRQs in HPD storms */
u8 hpd_short_storm_enabled;
@ -298,6 +309,11 @@ struct intel_display {
const struct intel_audio_funcs *audio;
} funcs;
struct {
bool any_task_allowed;
struct task_struct *allowed_task;
} access;
struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
@ -513,7 +529,7 @@ struct intel_display {
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
struct intel_opregion opregion;
struct intel_opregion *opregion;
struct intel_overlay *overlay;
struct intel_display_params params;
struct intel_vbt_data vbt;

View file

@ -86,28 +86,6 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
return 0;
}
static int i915_vbt(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
return 0;
}
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -1066,8 +1044,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
@ -1105,10 +1081,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
intel_bios_debugfs_register(i915);
intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
intel_opregion_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
intel_display_debugfs_params(i915);

View file

@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <drm/drm_drv.h>

View file

@ -1012,7 +1012,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
goto display_fused_off;
}
if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);

View file

@ -45,6 +45,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_modeset_lock.h"
#include "intel_modeset_setup.h"
#include "intel_opregion.h"
#include "intel_overlay.h"
@ -276,12 +277,144 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
return ret;
}
static void set_display_access(struct drm_i915_private *i915,
bool any_task_allowed,
struct task_struct *allowed_task)
{
struct drm_modeset_acquire_ctx ctx;
int err;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
if (err)
continue;
i915->display.access.any_task_allowed = any_task_allowed;
i915->display.access.allowed_task = allowed_task;
}
drm_WARN_ON(&i915->drm, err);
}
/**
* intel_display_driver_enable_user_access - Enable display HW access for all threads
* @i915: i915 device instance
*
* Enable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing.
*
* This function should be called during driver loading and system resume once
* all the HW initialization steps are done.
*/
void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
{
set_display_access(i915, true, NULL);
intel_hpd_enable_detection_work(i915);
}
/**
* intel_display_driver_disable_user_access - Disable display HW access for user threads
* @i915: i915 device instance
*
* Disable the display HW access for user threads. Examples for such accesses
* are modeset commits and connector probing. For the current thread the
* access is still enabled, which should only perform HW init/deinit
* programming (as the initial modeset during driver loading or the disabling
* modeset during driver unloading and system suspend/shutdown). This function
* should be followed by calling either intel_display_driver_enable_user_access()
* after completing the HW init programming or
* intel_display_driver_suspend_access() after completing the HW deinit
* programming.
*
* This function should be called during driver loading/unloading and system
* suspend/shutdown before starting the HW init/deinit programming.
*/
void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
{
intel_hpd_disable_detection_work(i915);
set_display_access(i915, false, current);
}
/**
* intel_display_driver_suspend_access - Suspend display HW access for all threads
* @i915: i915 device instance
*
* Disable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing. This call should be either
* followed by calling intel_display_driver_resume_access(), or the driver
* should be unloaded/shutdown.
*
* This function should be called during driver unloading and system
* suspend/shutdown after completing the HW deinit programming.
*/
void intel_display_driver_suspend_access(struct drm_i915_private *i915)
{
set_display_access(i915, false, NULL);
}
/**
* intel_display_driver_resume_access - Resume display HW access for the resume thread
* @i915: i915 device instance
*
* Enable the display HW access for the current resume thread, keeping the
* access disabled for all other (user) threads. Examples for such accesses
* are modeset commits and connector probing. The resume thread should only
* perform HW init programming (as the restoring modeset). This function
* should be followed by calling intel_display_driver_enable_user_access(),
* after completing the HW init programming steps.
*
* This function should be called during system resume before starting the HW
* init steps.
*/
void intel_display_driver_resume_access(struct drm_i915_private *i915)
{
set_display_access(i915, false, current);
}
/**
* intel_display_driver_check_access - Check if the current thread has disaplay HW access
* @i915: i915 device instance
*
* Check whether the current thread has display HW access, print a debug
* message if it doesn't. Such accesses are modeset commits and connector
* probing. If the function returns %false any HW access should be prevented.
*
* Returns %true if the current thread has display HW access, %false
* otherwise.
*/
bool intel_display_driver_check_access(struct drm_i915_private *i915)
{
char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
if (i915->display.access.any_task_allowed ||
i915->display.access.allowed_task == current)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
get_task_comm(comm, current),
task_pid_vnr(current));
if (i915->display.access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
get_task_comm(comm, i915->display.access.allowed_task),
task_pid_vnr(i915->display.access.allowed_task));
drm_dbg_kms(&i915->drm,
"Reject display access from task %s (allowed to %s)\n",
current_task, allowed_task);
return false;
}
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
enum pipe pipe;
struct intel_crtc *crtc;
int ret;
if (!HAS_DISPLAY(i915))
@ -315,8 +448,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_display_driver_init_hw(i915);
intel_dpll_update_ref_clks(i915);
intel_hdcp_component_init(i915);
if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
@ -326,16 +457,14 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_vga_disable(i915);
intel_setup_outputs(i915);
intel_display_driver_disable_user_access(i915);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
intel_crtc_initial_plane_config(crtc);
}
intel_initial_plane_config(i915);
/*
* Make sure hardware watermarks really match the state we read out.
@ -356,6 +485,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
/*
* This will bind stuff into ggtt, so it needs to be done after
* the BIOS fb takeover and whatever else magic ggtt reservations
* happen during gem/ggtt init.
*/
intel_hdcp_component_init(i915);
/*
* Force all active planes to recompute their states. So that on
* mode_setcrtc after probe, all the intel_plane_state variables
@ -374,7 +510,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
intel_hpd_poll_disable(i915);
skl_watermark_ipc_init(i915);
@ -394,6 +529,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
intel_display_driver_enable_user_access(i915);
intel_display_debugfs_register(i915);
/*
@ -412,6 +549,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(&i915->drm);
intel_hpd_poll_disable(i915);
intel_display_device_info_print(DISPLAY_INFO(i915),
DISPLAY_RUNTIME_INFO(i915), &p);
@ -440,6 +578,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
intel_display_driver_suspend_access(i915);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@ -486,14 +626,17 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
return;
intel_fbdev_unregister(i915);
intel_audio_deinit(i915);
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
drm_kms_helper_poll_fini(&i915->drm);
intel_display_driver_disable_user_access(i915);
intel_audio_deinit(i915);
drm_atomic_helper_shutdown(&i915->drm);
acpi_video_unregister();

View file

@ -32,5 +32,11 @@ int __intel_display_driver_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
void intel_display_driver_suspend_access(struct drm_i915_private *i915);
void intel_display_driver_resume_access(struct drm_i915_private *i915);
bool intel_display_driver_check_access(struct drm_i915_private *i915);
#endif /* __INTEL_DISPLAY_DRIVER_H__ */

View file

@ -266,12 +266,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
static bool i915_has_asle(struct drm_i915_private *i915)
{
if (!dev_priv->display.opregion.asle)
if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
return intel_opregion_asle_present(i915);
}
/**
@ -986,7 +986,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
@ -1587,7 +1587,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
struct intel_uncore *uncore = &i915->uncore;
u32 display_mask, extra_mask;
if (GRAPHICS_VER(i915) >= 7) {
if (DISPLAY_VER(i915) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |

View file

@ -780,6 +780,8 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
struct intel_memory_region *mem;
resource_size_t phys_base;
struct i915_vma *vma;
unsigned int tiling;
int size;
@ -1213,12 +1215,12 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
bool enable_psr2_su_region_et;
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
struct drm_dp_vsc_sdp psr_vsc;
/*
* Frequence the dpll for the port should run at. Differs from the
@ -1402,6 +1404,8 @@ struct intel_crtc_state {
u32 psr2_man_track_ctl;
struct drm_rect psr2_su_area;
/* Variable Refresh Rate state */
struct {
bool enable, in_range;
@ -1682,13 +1686,14 @@ struct intel_psr {
/* Mutex for PSR state of the transcoder */
struct mutex lock;
#define I915_PSR_DEBUG_MODE_MASK 0x0f
#define I915_PSR_DEBUG_DEFAULT 0x00
#define I915_PSR_DEBUG_DISABLE 0x01
#define I915_PSR_DEBUG_ENABLE 0x02
#define I915_PSR_DEBUG_FORCE_PSR1 0x03
#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
#define I915_PSR_DEBUG_IRQ 0x10
#define I915_PSR_DEBUG_MODE_MASK 0x0f
#define I915_PSR_DEBUG_DEFAULT 0x00
#define I915_PSR_DEBUG_DISABLE 0x01
#define I915_PSR_DEBUG_ENABLE 0x02
#define I915_PSR_DEBUG_FORCE_PSR1 0x03
#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
#define I915_PSR_DEBUG_IRQ 0x10
#define I915_PSR_DEBUG_SU_REGION_ET_DISABLE 0x20
u32 debug;
bool sink_support;
@ -1702,14 +1707,20 @@ struct intel_psr {
unsigned int busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
u8 io_wake_lines;
u8 fast_wake_lines;
struct {
u8 io_wake_lines;
u8 fast_wake_lines;
/* LNL and beyond */
u8 check_entry_lines;
} alpm_parameters;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@ -1833,6 +1844,8 @@ struct intel_dp {
/* When we last wrote the OUI for eDP */
unsigned long last_oui_write;
bool colorimetry_support;
};
enum lspcon_vendor {
@ -1890,6 +1903,9 @@ struct intel_digital_port {
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
bool (*connected)(struct intel_encoder *encoder);
void (*lock)(struct intel_digital_port *dig_port);
void (*unlock)(struct intel_digital_port *dig_port);
};
struct intel_dp_mst_encoder {

View file

@ -1158,7 +1158,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
str_yes_no(intel_dmc_has_payload(i915)));
seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
str_yes_no(DISPLAY_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",

View file

@ -56,6 +56,7 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@ -2616,58 +2617,38 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
struct drm_dp_vsc_sdp *vsc;
/* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
if (crtc_state->has_psr)
if ((!intel_dp->colorimetry_support ||
!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
!crtc_state->has_psr)
return;
if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
return;
vsc = &crtc_state->infoframes.vsc;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
vsc->sdp_type = DP_SDP_VSC;
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
&crtc_state->infoframes.vsc);
}
void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
vsc->sdp_type = DP_SDP_VSC;
if (crtc_state->has_psr2) {
if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [PSR2, +Colorimetry] */
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
vsc);
} else {
/*
* [PSR2, -Colorimetry]
* Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
* 3D stereo + PSR/PSR2 + Y-coordinate.
*/
vsc->revision = 0x4;
vsc->length = 0xe;
}
/* Needs colorimetry */
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
vsc);
} else if (crtc_state->has_psr2) {
/*
* [PSR2 without colorimetry]
* Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
* 3D stereo + PSR/PSR2 + Y-coordinate.
*/
vsc->revision = 0x4;
vsc->length = 0xe;
} else if (crtc_state->has_panel_replay) {
if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [Panel Replay with colorimetry info] */
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
vsc);
} else {
/*
* [Panel Replay without colorimetry info]
* Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
* VSC SDP supporting 3D stereo + Panel Replay.
*/
vsc->revision = 0x6;
vsc->length = 0x10;
}
/*
* [Panel Replay without colorimetry info]
* Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
* VSC SDP supporting 3D stereo + Panel Replay.
*/
vsc->revision = 0x6;
vsc->length = 0x10;
} else {
/*
* [PSR1]
@ -3345,13 +3326,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
fastset = false;
}
if (CAN_PSR(intel_dp)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
}
return fastset;
}
@ -4288,24 +4262,6 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_dp_vsc_sdp *vsc)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return;
dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
&sdp, len);
}
void intel_dp_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@ -4332,9 +4288,7 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable)
return;
/* When PSR is enabled, VSC SDP is handled by PSR routine */
if (!crtc_state->has_psr)
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}
@ -4465,10 +4419,6 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct dp_sdp sdp = {};
int ret;
/* When PSR is enabled, VSC SDP is handled by PSR routine */
if (crtc_state->has_psr)
return;
if ((crtc_state->infoframes.enable &
intel_hdmi_infoframe_enable(type)) == 0)
return;
@ -4679,31 +4629,36 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe = crtc->pipe;
u32 pattern_val;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_NONE:
case DP_LINK_QUAL_PATTERN_DISABLE:
drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
if (DISPLAY_VER(dev_priv) >= 10)
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
DP_TP_CTL_LINK_TRAIN_NORMAL);
break;
case DP_PHY_TEST_PATTERN_D10_2:
case DP_LINK_QUAL_PATTERN_D10_2:
drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
break;
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
case DP_LINK_QUAL_PATTERN_ERROR_RATE:
drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_SCRAMBLED_0);
break;
case DP_PHY_TEST_PATTERN_PRBS7:
case DP_LINK_QUAL_PATTERN_PRBS7:
drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
break;
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
/*
* FIXME: Ideally pattern should come from DPCD 0x250. As
* current firmware of DPR-100 could not set it, so hardcoding
@ -4721,7 +4676,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_CUSTOM80);
break;
case DP_PHY_TEST_PATTERN_CP2520:
case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
/*
* FIXME: Ideally pattern should come from DPCD 0x24A. As
* current firmware of DPR-100 could not set it, so hardcoding
@ -4733,8 +4688,19 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
pattern_val);
break;
case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
if (DISPLAY_VER(dev_priv) < 10) {
drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
break;
}
drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
break;
default:
WARN(1, "Invalid Phy Test Pattern\n");
drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
}
}
@ -5453,8 +5419,24 @@ edp_detect(struct intel_dp *intel_dp)
return connector_status_connected;
}
void intel_digital_port_lock(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (dig_port->lock)
dig_port->lock(dig_port);
}
void intel_digital_port_unlock(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (dig_port->unlock)
dig_port->unlock(dig_port);
}
/*
* intel_digital_port_connected - is the specified port connected?
* intel_digital_port_connected_locked - is the specified port connected?
* @encoder: intel_encoder
*
* In cases where there's a connector physically connected but it can't be used
@ -5462,21 +5444,44 @@ edp_detect(struct intel_dp *intel_dp)
* pretty much treat the port as disconnected. This is relevant for type-C
* (starting on ICL) where there's ownership involved.
*
* The caller must hold the lock acquired by calling intel_digital_port_lock()
* when calling this function.
*
* Return %true if port is connected, %false otherwise.
*/
bool intel_digital_port_connected(struct intel_encoder *encoder)
bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
bool is_connected = false;
intel_wakeref_t wakeref;
with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
is_connected = dig_port->connected(encoder);
with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
do {
is_connected = dig_port->connected(encoder);
if (is_connected || is_glitch_free)
break;
usleep_range(10, 30);
} while (time_before(jiffies, wait_expires));
}
return is_connected;
}
bool intel_digital_port_connected(struct intel_encoder *encoder)
{
bool ret;
intel_digital_port_lock(encoder);
ret = intel_digital_port_connected_locked(encoder);
intel_digital_port_unlock(encoder);
return ret;
}
static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
@ -5670,6 +5675,9 @@ intel_dp_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
if (!intel_display_driver_check_access(dev_priv))
return connector->status;
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@ -5770,6 +5778,10 @@ intel_dp_force(struct drm_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(dev_priv))
return;
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@ -6054,7 +6066,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
spin_unlock_irq(&i915->irq_lock);
if (need_work)
queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
intel_hpd_schedule_detection(i915);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@ -6497,6 +6509,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
connector->interlace_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_connector->base.polled = intel_connector->polled;
intel_connector_attach_encoder(intel_connector, intel_encoder);
@ -6527,6 +6540,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
intel_dp->colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;

View file

@ -109,20 +109,16 @@ int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc);
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_dp_vsc_sdp *vsc);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type);
void intel_digital_port_lock(struct intel_encoder *encoder);
void intel_digital_port_unlock(struct intel_encoder *encoder);
bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,

View file

@ -9,6 +9,7 @@
#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
@ -228,9 +229,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
@ -245,18 +245,16 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
if (is_tc_port) {
intel_tc_port_lock(dig_port);
/*
* Abort transfers on a disconnected port as required by
* DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
* timeouts that would otherwise happen.
* TODO: abort the transfer on non-TC ports as well.
*/
if (!intel_tc_port_connected_locked(&dig_port->base)) {
ret = -ENXIO;
goto out_unlock;
}
intel_digital_port_lock(encoder);
/*
* Abort transfers on a disconnected port as required by
* DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
* timeouts that would otherwise happen.
*/
if (!intel_dp_is_edp(intel_dp) &&
!intel_digital_port_connected_locked(&dig_port->base)) {
ret = -ENXIO;
goto out_unlock;
}
aux_domain = intel_aux_power_domain(dig_port);
@ -423,8 +421,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
out_unlock:
if (is_tc_port)
intel_tc_port_unlock(dig_port);
intel_digital_port_unlock(encoder);
return ret;
}

View file

@ -37,6 +37,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
@ -1410,6 +1411,9 @@ intel_dp_mst_detect(struct drm_connector *connector,
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->status;
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}

View file

@ -1263,11 +1263,11 @@ static const struct dpll_info hsw_plls[] = {
{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
.flags = INTEL_DPLL_ALWAYS_ON, },
.always_on = true, },
{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
.flags = INTEL_DPLL_ALWAYS_ON, },
.always_on = true, },
{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
.flags = INTEL_DPLL_ALWAYS_ON, },
.always_on = true, },
{}
};
@ -1945,7 +1945,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static const struct dpll_info skl_plls[] = {
{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
.flags = INTEL_DPLL_ALWAYS_ON, },
.always_on = true, },
{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
@ -3308,6 +3308,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct skl_wrpll_params pll_params = {};
@ -3326,7 +3328,11 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
if (old_crtc_state->shared_dpll &&
old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
&port_dpll->hw_state);
@ -4023,7 +4029,8 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@ -4068,7 +4075,8 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
static const struct dpll_info tgl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@ -4141,7 +4149,8 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
static const struct dpll_info adlp_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
@ -4465,31 +4474,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
if (!pll->info->always_on) {
I915_STATE_WARN(i915, !pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
"%s: pll in active use but not on in sw tracking\n",
pll->info->name);
I915_STATE_WARN(i915, pll->on && !pll->active_mask,
"pll is on but not used by any active pipe\n");
"%s: pll is on but not used by any active pipe\n",
pll->info->name);
I915_STATE_WARN(i915, pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
"%s: pll on state mismatch (expected %i, found %i)\n",
pll->info->name, pll->on, active);
}
if (!crtc) {
I915_STATE_WARN(i915,
pll->active_mask & ~pll->state.pipe_mask,
"more active pll users than references: 0x%x vs 0x%x\n",
pll->active_mask, pll->state.pipe_mask);
"%s: more active pll users than references: 0x%x vs 0x%x\n",
pll->info->name, pll->active_mask, pll->state.pipe_mask);
return;
}
@ -4498,21 +4505,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
if (new_crtc_state->hw.active)
I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
"%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
"%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
pipe_mask, pll->state.pipe_mask);
"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
pll->info->name, pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(i915,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
"%s: pll hw state mismatch\n",
pll->info->name);
}
static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
const struct intel_shared_dpll *new_pll)
{
return old_pll && new_pll && old_pll != new_pll &&
(old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
}
void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
@ -4534,11 +4549,15 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
"pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pipe_name(crtc->pipe), pll->state.pipe_mask);
"%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
new_crtc_state->shared_dpll) &&
pll->state.pipe_mask & pipe_mask,
"%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}

View file

@ -276,15 +276,21 @@ struct dpll_info {
*/
enum intel_display_power_domain power_domain;
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
/**
* @flags:
* @always_on:
*
* INTEL_DPLL_ALWAYS_ON
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
*/
u32 flags;
bool always_on;
/**
* @is_alt_port_dpll:
*
* Inform the state checker that the DPLL can be used as a fallback
* (for TC->TBT fallback).
*/
bool is_alt_port_dpll;
};
/**

View file

@ -453,6 +453,10 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
if (!HAS_DSB(i915))
return NULL;
/* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
if (!IS_ENABLED(I915))
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;

View file

@ -35,6 +35,7 @@
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
@ -328,6 +329,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->base.status;
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@ -536,6 +540,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
drm_connector_init_with_ddc(&i915->drm, &connector->base,
&intel_dvo_connector_funcs,

View file

@ -1087,18 +1087,7 @@ static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
case I915_FORMAT_MOD_4_TILED:
case I915_FORMAT_MOD_X_TILED:
return true;
default:
return false;
}
return true;
}
static bool tiling_is_valid(const struct intel_plane_state *plane_state)

View file

@ -78,8 +78,9 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
(unsigned long)(mem->io_start +
i915_gem_object_get_dma_address(obj, 0));
(unsigned long)(mem->io.start +
i915_gem_object_get_dma_address(obj, 0) -
mem->region.start);
info->fix.smem_len = obj->base.size;
} else {
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;

View file

@ -10,12 +10,55 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
struct intel_global_commit {
struct kref ref;
struct completion done;
};
static struct intel_global_commit *commit_new(void)
{
struct intel_global_commit *commit;
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
return NULL;
init_completion(&commit->done);
kref_init(&commit->ref);
return commit;
}
static void __commit_free(struct kref *kref)
{
struct intel_global_commit *commit =
container_of(kref, typeof(*commit), ref);
kfree(commit);
}
static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
{
if (commit)
kref_get(&commit->ref);
return commit;
}
static void commit_put(struct intel_global_commit *commit)
{
if (commit)
kref_put(&commit->ref, __commit_free);
}
static void __intel_atomic_global_state_free(struct kref *kref)
{
struct intel_global_state *obj_state =
container_of(kref, struct intel_global_state, ref);
struct intel_global_obj *obj = obj_state->obj;
commit_put(obj_state->commit);
obj->funcs->atomic_destroy_state(obj, obj_state);
}
@ -127,6 +170,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
obj_state->obj = obj;
obj_state->changed = false;
obj_state->serialized = false;
obj_state->commit = NULL;
kref_init(&obj_state->ref);
@ -239,19 +284,13 @@ int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
{
struct intel_atomic_state *state = obj_state->state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
int ret;
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state;
ret = intel_atomic_lock_global_state(obj_state);
if (ret)
return ret;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
obj_state->changed = true;
obj_state->serialized = true;
return 0;
}
@ -267,3 +306,79 @@ intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
return false;
return true;
}
int
intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
{
const struct intel_global_state *old_obj_state;
struct intel_global_state *new_obj_state;
struct intel_global_obj *obj;
int i;
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
struct intel_global_commit *commit = NULL;
if (new_obj_state->serialized) {
/*
* New commit which is going to be completed
* after the hardware reprogramming is done.
*/
commit = commit_new();
if (!commit)
return -ENOMEM;
} else if (new_obj_state->changed) {
/*
* We're going to swap to this state, so carry the
* previous commit along, in case it's not yet done.
*/
commit = commit_get(old_obj_state->commit);
}
new_obj_state->commit = commit;
}
return 0;
}
int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_global_state *old_obj_state;
struct intel_global_obj *obj;
int i;
for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
struct intel_global_commit *commit = old_obj_state->commit;
long ret;
if (!commit)
continue;
ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
if (ret == 0) {
drm_err(&i915->drm, "global state timed out\n");
return -ETIMEDOUT;
}
}
return 0;
}
void
intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
{
const struct intel_global_state *new_obj_state;
struct intel_global_obj *obj;
int i;
for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
struct intel_global_commit *commit = new_obj_state->commit;
if (!new_obj_state->serialized)
continue;
complete_all(&commit->done);
}
}

View file

@ -54,11 +54,14 @@ struct intel_global_obj {
(__i)++) \
for_each_if(obj)
struct intel_global_commit;
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
struct intel_global_commit *commit;
struct kref ref;
bool changed;
bool changed, serialized;
};
struct __intel_global_objs_state {
@ -87,6 +90,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state);
int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
int intel_atomic_global_state_setup_commit(struct intel_atomic_state *state);
void intel_atomic_global_state_commit_done(struct intel_atomic_state *state);
int intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state);
bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state);
#endif

View file

@ -155,7 +155,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
const struct gmbus_pin *pins;
size_t size;
if (INTEL_PCH_TYPE(i915) >= PCH_LNL) {
if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
@ -164,9 +164,6 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
} else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);

View file

@ -347,7 +347,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
default:
drm_err(&i915->drm, "Unknown transcoder %d\n",
cpu_transcoder);
return -EINVAL;
return 0;
}
}
@ -364,7 +364,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
drm_err(&i915->drm, "Unknown port %d\n", port);
return -EINVAL;
return 0;
}
}
@ -853,8 +853,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (shim->stream_encryption) {
ret = shim->stream_encryption(connector, true);
if (ret) {
drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
@ -878,14 +878,14 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
u32 repeater_ctl;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_encryption) {
ret = hdcp->shim->stream_encryption(connector, false);
if (ret) {
drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
@ -929,8 +929,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
int i, ret, tries = 3;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n",
connector->base.name, connector->base.base.id);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
connector->base.base.id, connector->base.name);
if (!hdcp_key_loadable(i915)) {
drm_err(&i915->drm, "HDCP key Load is not possible\n");
@ -1027,8 +1027,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
if (drm_WARN_ON(&i915->drm,
!intel_hdcp_in_use(i915, cpu_transcoder, port))) {
drm_err(&i915->drm,
"%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
connector->base.base.id, connector->base.name,
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
ret = -ENXIO;
intel_hdcp_update_value(connector,
@ -1046,8 +1046,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
}
drm_dbg_kms(&i915->drm,
"[%s:%d] HDCP link failed, retrying authentication\n",
connector->base.name, connector->base.base.id);
"[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
connector->base.base.id, connector->base.name);
ret = _intel_hdcp_disable(connector);
if (ret) {
@ -1633,6 +1633,12 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
drm_dbg_kms(&i915->drm,
"HDCP1.x or 2.0 Legacy Device Downstream\n");
return -EINVAL;
}
/* Converting and Storing the seq_num_v to local variable as DWORD */
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
@ -1731,8 +1737,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS)) {
drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
connector->base.base.id, connector->base.name);
ret = -EPERM;
goto link_recover;
}
@ -1740,8 +1746,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, true);
if (ret) {
drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
@ -1925,8 +1931,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
connector->base.name, connector->base.base.id,
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
connector->base.base.id, connector->base.name,
hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
@ -1936,8 +1942,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
return ret;
}
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
connector->base.name, connector->base.base.id,
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
connector->base.base.id, connector->base.name,
hdcp->content_type);
hdcp->hdcp2_encrypted = true;
@ -1953,14 +1959,14 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
connector->base.name, connector->base.base.id);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, false);
if (ret) {
drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
@ -2040,20 +2046,20 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
drm_dbg_kms(&i915->drm,
"[%s:%d] Repeater topology auth failed.(%d)\n",
connector->base.name, connector->base.base.id,
"[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
connector->base.base.id, connector->base.name,
ret);
} else {
drm_dbg_kms(&i915->drm,
"[%s:%d] HDCP2.2 link failed, retrying auth\n",
connector->base.name, connector->base.base.id);
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
connector->base.base.id, connector->base.name);
}
ret = _intel_hdcp2_disable(connector, true);
if (ret) {
drm_err(&i915->drm,
"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
connector->base.name, connector->base.base.id, ret);
"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
connector->base.base.id, connector->base.name, ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
goto out;
@ -2062,8 +2068,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
ret = _intel_hdcp2_enable(connector);
if (ret) {
drm_dbg_kms(&i915->drm,
"[%s:%d] Failed to enable hdcp2.2 (%d)\n",
connector->base.name, connector->base.base.id,
"[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n",
connector->base.base.id, connector->base.name,
ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
@ -2341,8 +2347,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
return -ENOENT;
if (!connector->encoder) {
drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n",
connector->base.name, connector->base.base.id);
drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
connector->base.base.id, connector->base.name);
return -ENODEV;
}

View file

@ -65,7 +65,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
goto out_unmap;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto out_unmap;

View file

@ -8,6 +8,8 @@
#include "intel_display_reg_defs.h"
#define TRANS_HDCP(__i915) (DISPLAY_VER(__i915) >= 12)
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
@ -82,7 +84,7 @@
#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
_TRANSB_HDCP_CONF)
#define HDCP_CONF(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_CONF(trans) : \
PORT_HDCP_CONF(port))
@ -95,7 +97,7 @@
_TRANSA_HDCP_ANINIT, \
_TRANSB_HDCP_ANINIT)
#define HDCP_ANINIT(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANINIT(trans) : \
PORT_HDCP_ANINIT(port))
@ -105,7 +107,7 @@
#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
_TRANSB_HDCP_ANLO)
#define HDCP_ANLO(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANLO(trans) : \
PORT_HDCP_ANLO(port))
@ -115,7 +117,7 @@
#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
_TRANSB_HDCP_ANHI)
#define HDCP_ANHI(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_ANHI(trans) : \
PORT_HDCP_ANHI(port))
@ -126,7 +128,7 @@
_TRANSA_HDCP_BKSVLO, \
_TRANSB_HDCP_BKSVLO)
#define HDCP_BKSVLO(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVLO(trans) : \
PORT_HDCP_BKSVLO(port))
@ -137,7 +139,7 @@
_TRANSA_HDCP_BKSVHI, \
_TRANSB_HDCP_BKSVHI)
#define HDCP_BKSVHI(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_BKSVHI(trans) : \
PORT_HDCP_BKSVHI(port))
@ -148,7 +150,7 @@
_TRANSA_HDCP_RPRIME, \
_TRANSB_HDCP_RPRIME)
#define HDCP_RPRIME(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_RPRIME(trans) : \
PORT_HDCP_RPRIME(port))
@ -159,7 +161,7 @@
_TRANSA_HDCP_STATUS, \
_TRANSB_HDCP_STATUS)
#define HDCP_STATUS(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP_STATUS(trans) : \
PORT_HDCP_STATUS(port))
@ -200,7 +202,7 @@
#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
#define AUTH_CLR_KEYS REG_BIT(18)
#define HDCP2_AUTH(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH(trans) : \
PORT_HDCP2_AUTH(port))
@ -211,7 +213,7 @@
_TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
#define HDCP2_CTL(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_CTL(trans) : \
PORT_HDCP2_CTL(port))
@ -225,7 +227,7 @@
#define LINK_AUTH_STATUS REG_BIT(21)
#define LINK_ENCRYPTION_STATUS REG_BIT(20)
#define HDCP2_STATUS(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
@ -247,7 +249,7 @@
#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
#define STREAM_TYPE_STATUS REG_BIT(30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
@ -263,7 +265,7 @@
_TRANSB_HDCP2_AUTH_STREAM)
#define AUTH_STREAM_TYPE REG_BIT(31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
(GRAPHICS_VER(dev_priv) >= 12 ? \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
PORT_HDCP2_AUTH_STREAM(port))

View file

@ -49,6 +49,7 @@
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
@ -523,10 +524,12 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
return;
if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC))
val |= hsw_infoframe_enable(type);
if (type == DP_SDP_VSC)
val |= VSC_DIP_HW_DATA_SW_HEA;
val |= hsw_infoframe_enable(type);
intel_de_write(dev_priv, ctl_reg, val);
intel_de_posting_read(dev_priv, ctl_reg);
}
@ -2503,6 +2506,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
if (!intel_display_driver_check_access(dev_priv))
return connector->status;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (DISPLAY_VER(dev_priv) >= 11 &&
@ -2531,6 +2537,9 @@ intel_hdmi_force(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
return;
intel_hdmi_unset_edid(connector);
if (connector->status != connector_status_connected)
@ -3015,6 +3024,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_connector->base.polled = intel_connector->polled;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;

View file

@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
static bool detection_work_enabled(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->irq_lock);
return i915->display.hotplug.detection_work_enabled;
}
static bool
mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return queue_work(i915->unordered_wq, work);
}
static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
@ -213,9 +253,9 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_reschedule(&dev_priv->drm);
mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
mod_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits;
queue_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock_irq(&dev_priv->irq_lock);
queue_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work, 0);
}
}
@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
mod_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
spin_unlock_irq(&dev_priv->irq_lock);
}
}
@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*/
if (storm_detected)
intel_hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
queue_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work, 0);
queue_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock(&dev_priv->irq_lock);
}
/**
@ -710,6 +751,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
cancel_work(&dev_priv->display.hotplug.poll_init_work);
}
spin_lock_irq(&dev_priv->irq_lock);
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@ -718,6 +761,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
@ -726,6 +772,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
spin_unlock_irq(&dev_priv->irq_lock);
if (enabled)
drm_kms_helper_poll_reschedule(&dev_priv->drm);
@ -774,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
queue_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.poll_init_work);
spin_lock_irq(&dev_priv->irq_lock);
queue_detection_work(dev_priv,
&dev_priv->display.hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@ -803,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
queue_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.poll_init_work);
spin_lock_irq(&dev_priv->irq_lock);
queue_detection_work(dev_priv,
&dev_priv->display.hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
}
void intel_hpd_init_early(struct drm_i915_private *i915)
@ -826,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915)
i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
}
static bool cancel_all_detection_work(struct drm_i915_private *i915)
{
bool was_pending = false;
if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
was_pending = true;
if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
was_pending = true;
if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
was_pending = true;
return was_pending;
}
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
@ -841,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
if (cancel_all_detection_work(dev_priv))
drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@ -873,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
spin_unlock_irq(&dev_priv->irq_lock);
}
static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
{
bool queue_work = false;
enum hpd_pin pin;
lockdep_assert_held(&i915->irq_lock);
if (i915->display.hotplug.event_bits ||
i915->display.hotplug.retry_bits)
queue_work = true;
for_each_hpd_pin(pin) {
switch (i915->display.hotplug.stats[pin].state) {
case HPD_MARK_DISABLED:
queue_work = true;
break;
case HPD_ENABLED:
break;
default:
MISSING_CASE(i915->display.hotplug.stats[pin].state);
}
}
if (queue_work)
queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
}
void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
{
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.detection_work_enabled = true;
queue_work_for_missed_irqs(i915);
spin_unlock_irq(&i915->irq_lock);
}
void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
{
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.detection_work_enabled = false;
spin_unlock_irq(&i915->irq_lock);
cancel_all_detection_work(i915);
}
bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
{
unsigned long flags;
bool ret;
spin_lock_irqsave(&i915->irq_lock, flags);
ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
spin_unlock_irqrestore(&i915->irq_lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;

View file

@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_debugfs_register(struct drm_i915_private *i915);
void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
#endif /* __INTEL_HOTPLUG_H__ */

View file

@ -163,12 +163,10 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
(!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
return;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_LNL)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
@ -1139,7 +1137,7 @@ static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
xe2lpd_sde_hpd_irq_setup(i915);
else if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
mtp_hpd_irq_setup(i915);
}

View file

@ -250,11 +250,36 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
#define OPREGION_SIZE (8 * 1024)
struct intel_opregion {
struct drm_i915_private *i915;
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
struct opregion_asle_ext *asle_ext;
void *rvda;
void *vbt_firmware;
const void *vbt;
u32 vbt_size;
struct work_struct asle_work;
struct notifier_block acpi_notifier;
};
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
struct opregion_swsci *swsci = i915->display.opregion.swsci;
struct intel_opregion *opregion = i915->display.opregion;
struct opregion_swsci *swsci;
u32 main_function, sub_function;
if (!opregion)
return -ENODEV;
swsci = opregion->swsci;
if (!swsci)
return -ENODEV;
@ -265,11 +290,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
if ((i915->display.opregion.swsci_sbcb_sub_functions &
if ((opregion->swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
if ((i915->display.opregion.swsci_gbda_sub_functions &
if ((opregion->swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@ -280,7 +305,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
struct opregion_swsci *swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@ -290,6 +315,8 @@ static int swsci(struct drm_i915_private *dev_priv,
if (ret)
return ret;
swsci = dev_priv->display.opregion->swsci;
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
if (!dslp) {
@ -462,7 +489,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct opregion_asle *asle = dev_priv->display.opregion.asle;
struct opregion_asle *asle = dev_priv->display.opregion->asle;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@ -584,9 +611,8 @@ static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, display.opregion);
struct opregion_asle *asle = dev_priv->display.opregion.asle;
struct drm_i915_private *dev_priv = opregion->i915;
struct opregion_asle *asle = opregion->asle;
u32 aslc_stat = 0;
u32 aslc_req;
@ -632,11 +658,17 @@ static void asle_work(struct work_struct *work)
asle->aslc = aslc_stat;
}
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
bool intel_opregion_asle_present(struct drm_i915_private *i915)
{
if (dev_priv->display.opregion.asle)
queue_work(dev_priv->unordered_wq,
&dev_priv->display.opregion.asle_work);
return i915->display.opregion && i915->display.opregion->asle;
}
void intel_opregion_asle_intr(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = i915->display.opregion;
if (opregion && opregion->asle)
queue_work(i915->unordered_wq, &opregion->asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@ -692,7 +724,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@ -731,7 +763,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_opregion *opregion = dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@ -761,7 +793,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_opregion *opregion = dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@ -839,7 +871,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_opregion *opregion = dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->display.params.vbt_firmware;
int ret;
@ -879,7 +911,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_opregion *opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@ -902,11 +934,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
return -ENOTSUPP;
}
opregion = kzalloc(sizeof(*opregion), GFP_KERNEL);
if (!opregion)
return -ENOMEM;
opregion->i915 = dev_priv;
dev_priv->display.opregion = opregion;
INIT_WORK(&opregion->asle_work, asle_work);
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
if (!base)
return -ENOMEM;
if (!base) {
err = -ENOMEM;
goto err_memremap;
}
memcpy(buf, base, sizeof(buf));
@ -916,7 +957,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
goto err_out;
}
opregion->header = base;
opregion->lid_state = base + ACPI_CLID;
drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
opregion->header->over.major,
@ -1034,6 +1074,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
err_out:
memunmap(base);
err_memremap:
kfree(opregion);
dev_priv->display.opregion = NULL;
return err;
}
@ -1106,12 +1150,12 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
const struct drm_edid *drm_edid;
const void *edid;
int len;
if (!opregion->asle_ext)
if (!opregion || !opregion->asle_ext)
return NULL;
edid = opregion->asle_ext->bddc;
@ -1132,10 +1176,28 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
struct intel_opregion *opregion = i915->display.opregion;
if (!opregion || !opregion->vbt)
return NULL;
if (size)
*size = opregion->vbt_size;
return opregion->vbt;
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct opregion_header *header = opregion->header;
struct intel_opregion *opregion = i915->display.opregion;
struct opregion_header *header;
if (!opregion)
return false;
header = opregion->header;
if (!header || header->over.major < 2 ||
(header->over.major == 2 && header->over.minor < 3))
@ -1146,9 +1208,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (!opregion->header)
if (!opregion)
return;
if (opregion->acpi) {
@ -1162,7 +1224,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (opregion->acpi) {
intel_didl_outputs(i915);
@ -1188,9 +1250,9 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (!opregion->header)
if (!opregion)
return;
if (HAS_DISPLAY(i915))
@ -1201,12 +1263,12 @@ void intel_opregion_resume(struct drm_i915_private *i915)
static void intel_opregion_suspend_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
cancel_work_sync(&i915->display.opregion.asle_work);
cancel_work_sync(&opregion->asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@ -1214,9 +1276,9 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (!opregion->header)
if (!opregion)
return;
intel_opregion_notify_adapter(i915, state);
@ -1227,11 +1289,11 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
if (!opregion->header)
if (!opregion)
return;
if (opregion->acpi_notifier.notifier_call) {
@ -1242,26 +1304,36 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
void intel_opregion_cleanup(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
struct intel_opregion *opregion = i915->display.opregion;
if (!opregion->header)
if (!opregion)
return;
/* just clear all opregion memory pointers now */
memunmap(opregion->header);
if (opregion->rvda) {
if (opregion->rvda)
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
if (opregion->vbt_firmware) {
kfree(opregion->vbt_firmware);
opregion->vbt_firmware = NULL;
}
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->asle_ext = NULL;
opregion->vbt = NULL;
opregion->lid_state = NULL;
kfree(opregion->vbt_firmware);
kfree(opregion);
i915->display.opregion = NULL;
}
static int intel_opregion_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
struct intel_opregion *opregion = i915->display.opregion;
if (opregion)
seq_write(m, opregion->header, OPREGION_SIZE);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_opregion);
void intel_opregion_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
i915, &intel_opregion_fops);
}

View file

@ -25,38 +25,13 @@
#ifndef _INTEL_OPREGION_H_
#define _INTEL_OPREGION_H_
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/types.h>
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct opregion_asle_ext;
struct intel_opregion {
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
struct opregion_asle_ext *asle_ext;
void *rvda;
void *vbt_firmware;
const void *vbt;
u32 vbt_size;
u32 *lid_state;
struct work_struct asle_work;
struct notifier_block acpi_notifier;
};
#define OPREGION_SIZE (8 * 1024)
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
@ -69,6 +44,7 @@ void intel_opregion_resume(struct drm_i915_private *dev_priv);
void intel_opregion_suspend(struct drm_i915_private *dev_priv,
pci_power_t state);
bool intel_opregion_asle_present(struct drm_i915_private *i915);
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@ -77,8 +53,12 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
void intel_opregion_debugfs_register(struct drm_i915_private *i915);
#else /* CONFIG_ACPI*/
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
@ -107,6 +87,11 @@ static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
{
}
static inline bool intel_opregion_asle_present(struct drm_i915_private *i915)
{
return false;
}
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
@ -134,11 +119,21 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
static inline const void *
intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
{
return NULL;
}
static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
return false;
}
static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915)
{
}
#endif /* CONFIG_ACPI */
#endif

View file

@ -37,6 +37,7 @@
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_lvds_regs.h"
@ -683,6 +684,9 @@ intel_panel_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->status;
return connector_status_connected;
}

View file

@ -3,9 +3,11 @@
* Copyright © 2021 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@ -13,20 +15,21 @@
#include "intel_plane_initial.h"
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
const struct intel_initial_plane_config *plane_config,
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@ -34,7 +37,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
*vma = plane_state->ggtt_vma;
return true;
@ -44,12 +47,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
return false;
}
static bool
initial_plane_phys_lmem(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
struct intel_memory_region *mem;
dma_addr_t dma_addr;
gen8_pte_t pte;
u32 base;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
gte += base / I915_GTT_PAGE_SIZE;
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
drm_err(&i915->drm,
"Initial plane programming missing PTE_LM bit\n");
return false;
}
dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
if (IS_DGFX(i915))
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
else
mem = i915->mm.stolen_region;
if (!mem) {
drm_dbg_kms(&i915->drm,
"Initial plane memory region not initialized\n");
return false;
}
/*
* On lmem we don't currently expect this to
* ever be placed in the stolen portion.
*/
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(&i915->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
return false;
}
drm_dbg(&i915->drm,
"Using dma_addr=%pa, based on initial plane programming\n",
&dma_addr);
plane_config->phys_base = dma_addr - mem->region.start;
plane_config->mem = mem;
return true;
}
static bool
initial_plane_phys_smem(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
u32 base;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
mem = i915->mm.stolen_region;
if (!mem) {
drm_dbg_kms(&i915->drm,
"Initial plane memory region not initialized\n");
return false;
}
/* FIXME get and validate the dma_addr from the PTE */
plane_config->phys_base = base;
plane_config->mem = mem;
return true;
}
static bool
initial_plane_phys(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
return initial_plane_phys_lmem(i915, plane_config);
else
return initial_plane_phys_smem(i915, plane_config);
}
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
u32 base, size;
@ -58,45 +149,13 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
if (IS_DGFX(i915)) {
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
gen8_pte_t pte;
gte += base / I915_GTT_PAGE_SIZE;
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
drm_err(&i915->drm,
"Initial plane programming missing PTE_LM bit\n");
return NULL;
}
phys_base = pte & I915_GTT_PAGE_MASK;
mem = i915->mm.regions[INTEL_REGION_LMEM_0];
/*
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
if (phys_base >= resource_size(&mem->region)) {
drm_err(&i915->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
return NULL;
}
drm_dbg(&i915->drm,
"Using phys_base=%pa, based on initial plane programming\n",
&phys_base);
} else {
phys_base = base;
mem = i915->mm.stolen_region;
}
if (!mem)
if (!initial_plane_phys(i915, plane_config))
return NULL;
phys_base = plane_config->phys_base;
mem = plane_config->mem;
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@ -108,14 +167,19 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
size * 2 > i915->dsm.usable_size)
size * 2 > i915->dsm.usable_size) {
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
obj = i915_gem_object_create_region_at(mem, phys_base, size,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
if (IS_ERR(obj))
if (IS_ERR(obj)) {
drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
mem->region.name);
return NULL;
}
/*
* Mark it WT ahead of time to avoid changing the
@ -139,23 +203,66 @@ initial_plane_vma(struct drm_i915_private *i915,
goto err_obj;
}
/*
* MTL GOP likes to place the framebuffer high up in ggtt,
* which can cause problems for ggtt_reserve_guc_top().
* Try to pin it to a low ggtt address instead to avoid that.
*/
base = 0;
if (base != plane_config->base) {
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
int ret;
/*
* Make sure the original and new locations
* can't overlap. That would corrupt the original
* PTEs which are still being used for scanout.
*/
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
size, plane_config->base,
I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
goto err_obj;
}
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err_obj;
retry:
pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
if (HAS_GMCH(i915))
if (!i915_gem_object_is_lmem(obj))
pinctl |= PIN_MAPPABLE;
if (i915_vma_pin(vma, 0, 0, pinctl))
if (i915_vma_pin(vma, 0, 0, pinctl)) {
if (drm_mm_node_allocated(&orig_mm)) {
drm_mm_remove_node(&orig_mm);
/*
* Try again, but this time pin
* it to its original location.
*/
base = plane_config->base;
goto retry;
}
goto err_obj;
}
if (i915_gem_object_is_tiled(obj) &&
!i915_vma_is_map_and_fenceable(vma))
goto err_obj;
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
drm_dbg_kms(&i915->drm,
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
i915_ggtt_offset(vma), plane_config->base);
return vma;
err_obj:
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
i915_gem_object_put(obj);
return NULL;
}
@ -210,10 +317,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
struct intel_initial_plane_config plane_configs[])
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@ -239,7 +347,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma))
goto valid_fb;
/*
@ -302,25 +410,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
void intel_initial_plane_config(struct drm_i915_private *i915)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_initial_plane_config plane_config = {};
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
plane_config_fini(&plane_config);
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, plane_configs);
if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
}
}

View file

@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
struct intel_crtc;
struct drm_i915_private;
void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
void intel_initial_plane_config(struct drm_i915_private *i915);
#endif

View file

@ -366,7 +366,7 @@ static bool intel_pps_is_valid(struct intel_dp *intel_dp)
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) < PCH_MTP)
INTEL_PCH_TYPE(i915) <= PCH_ADP)
return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;

View file

@ -173,6 +173,12 @@
* irrelevant for normal operation.
*/
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
(intel_dp)->psr.source_panel_replay_support)
bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
@ -528,7 +534,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp_get_sink_sync_latency(intel_dp);
if (DISPLAY_VER(i915) >= 9 &&
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@ -560,11 +566,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->psr_dpcd[0])
_psr_init_dpcd(intel_dp);
if (intel_dp->psr.sink_psr2_support) {
intel_dp->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
if (intel_dp->psr.sink_psr2_support)
intel_dp_get_su_granularity(intel_dp);
}
}
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
@ -604,6 +607,18 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_ctl);
}
static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (DISPLAY_VER(i915) >= 20 &&
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
!(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
return true;
return false;
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -619,6 +634,8 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
if (psr2_su_region_et_valid(intel_dp))
dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
} else {
if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
@ -762,8 +779,8 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
static int psr2_block_count_lines(struct intel_dp *intel_dp)
{
return intel_dp->psr.io_wake_lines < 9 &&
intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
@ -800,6 +817,7 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
@ -841,17 +859,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = map[psr->alpm_parameters.io_wake_lines -
TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(dev_priv) >= 12) {
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@ -869,6 +888,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
}
if (psr2_su_region_et_valid(intel_dp))
val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@ -1031,6 +1053,9 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
if (psr2_su_region_et_valid(intel_dp))
crtc_state->enable_psr2_su_region_et = true;
return crtc_state->enable_psr2_sel_fetch = true;
}
@ -1101,8 +1126,32 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int check_entry_lines;
if (DISPLAY_VER(i915) < 20)
return true;
/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
check_entry_lines = 2 +
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
if (check_entry_lines > 15)
return false;
if (i915->display.params.psr_safest_params)
check_entry_lines = 15;
intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
return true;
}
static bool _compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
@ -1115,6 +1164,8 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
* it is not enough -> use 45 us.
*/
fast_wake_time = 45;
/* TODO: Check how we can use ALPM_CTL fast wake extended field */
max_wake_lines = 12;
} else {
io_wake_time = 50;
@ -1131,12 +1182,15 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
return false;
if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@ -1268,7 +1322,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
if (!_compute_alpm_params(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, Unable to use long enough wake times\n");
return false;
@ -1377,10 +1431,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
&crtc_state->psr_vsc);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@ -1504,6 +1554,21 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
wa_16013835468_bit_get(intel_dp), 0);
}
static void lnl_alpm_configure(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
if (DISPLAY_VER(dev_priv) < 20)
return;
intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@ -1569,6 +1634,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
lnl_alpm_configure(intel_dp);
/*
* Wa_16013835468
* Wa_14015648006
@ -1634,7 +1701,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
struct intel_encoder *encoder = &dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
@ -1662,7 +1728,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@ -1951,7 +2016,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
struct drm_rect *clip, bool full_update)
bool full_update)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -1966,17 +2031,21 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
goto exit;
}
if (clip->y1 == -1)
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
drm_WARN_ON(crtc_state->uapi.crtc->dev,
crtc_state->psr2_su_area.y1 % 4 ||
crtc_state->psr2_su_area.y2 % 4);
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
crtc_state->psr2_su_area.y1 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
crtc_state->psr2_su_area.y2 / 4 + 1);
}
exit:
crtc_state->psr2_man_track_ctl = val;
@ -2002,8 +2071,7 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
struct drm_rect *pipe_clip)
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
@ -2016,9 +2084,32 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
else
y_alignment = crtc_state->su_y_granularity;
pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
if (pipe_clip->y2 % y_alignment)
pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
if (crtc_state->psr2_su_area.y2 % y_alignment)
crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
y_alignment) + 1) * y_alignment;
}
/*
* When early transport is in use we need to extend SU area to cover
* cursor fully when cursor is in SU area.
*/
static void
intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
struct intel_plane_state *cursor_state)
{
struct drm_rect inter;
if (!crtc_state->enable_psr2_su_region_et ||
!cursor_state->uapi.visible)
return;
inter = crtc_state->psr2_su_area;
if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
return;
clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
&crtc_state->pipe_src);
}
/*
@ -2061,8 +2152,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane_state *new_plane_state, *old_plane_state,
*cursor_plane_state = NULL;
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@ -2075,6 +2166,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
goto skip_sel_fetch_set_loop;
}
crtc_state->psr2_su_area.x1 = 0;
crtc_state->psr2_su_area.y1 = -1;
crtc_state->psr2_su_area.x2 = INT_MAX;
crtc_state->psr2_su_area.y2 = -1;
/*
* Calculate minimal selective fetch area of each plane and calculate
* the pipe damaged area.
@ -2109,14 +2205,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area,
clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area,
clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
}
continue;
@ -2124,7 +2220,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area,
clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
&crtc_state->pipe_src);
continue;
}
@ -2141,7 +2237,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
/*
* Cursor plane new state is stored to adjust su area to cover
* cursor are fully.
*/
if (plane->id == PLANE_CURSOR)
cursor_plane_state = new_plane_state;
}
/*
@ -2150,7 +2253,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* should identify cases where this happens and fix the area
* calculation for those.
*/
if (pipe_clip.y1 == -1) {
if (crtc_state->psr2_su_area.y1 == -1) {
drm_info_once(&dev_priv->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
@ -2164,13 +2267,17 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
crtc_state->splitter.enable)
pipe_clip.y1 = 0;
crtc_state->psr2_su_area.y1 = 0;
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
/* Adjust su area to cover cursor fully as necessary */
if (cursor_plane_state)
intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
/*
* Now that we have the pipe damaged area check if it intersect with
@ -2185,7 +2292,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
!new_plane_state->uapi.visible)
continue;
inter = pipe_clip;
inter = crtc_state->psr2_su_area;
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
sel_fetch_area->y1 = -1;
@ -2230,7 +2337,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
psr2_man_trk_ctl_calc(crtc_state, full_update);
return 0;
}
@ -2799,6 +2906,9 @@ void intel_psr_init(struct intel_dp *intel_dp)
else
intel_dp->psr.source_support = true;
/* Disable early transport for now */
intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */

View file

@ -21,12 +21,6 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
(intel_dp)->psr.source_panel_replay_support)
bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,

View file

@ -159,6 +159,7 @@
#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1)
#define LNL_EDP_PSR2_SU_REGION_ET_ENABLE REG_BIT(27)
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
#define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20)
@ -245,6 +246,11 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
/* PSR2 Early transport */
#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A)
#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
#define _SEL_FETCH_PLANE_BASE_3_A 0x708D0
@ -290,4 +296,61 @@
_SEL_FETCH_PLANE_OFFSET_1_A - \
_SEL_FETCH_PLANE_BASE_1_A)
#define _ALPM_CTL_A 0x60950
#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A)
#define ALPM_CTL_ALPM_ENABLE REG_BIT(31)
#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30)
#define ALPM_CTL_LOBF_ENABLE REG_BIT(29)
#define ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE REG_BIT(28)
#define ALPM_CTL_KEEP_FEC_ENABLE_FOR_AUX_WAKE_SLEEP REG_BIT(27)
#define ALPM_CTL_RESTORE_OCCURED REG_BIT(26)
#define ALPM_CTL_RESTORE_TO_SLEEP REG_BIT(25)
#define ALPM_CTL_RESTORE_TO_DEEP_SLEEP REG_BIT(24)
#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(23, 21)
#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 0)
#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_128_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 1)
#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_256_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 2)
#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_512_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 3)
#define ALPM_CTL_AUX_WAKE_SLEEP_HOLD_ENABLE REG_BIT(20)
#define ALPM_CTL_ALPM_ENTRY_CHECK_MASK REG_GENMASK(19, 16)
#define ALPM_CTL_ALPM_ENTRY_CHECK(val) REG_FIELD_PREP(ALPM_CTL_ALPM_ENTRY_CHECK_MASK, val)
#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK REG_GENMASK(13, 8)
#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
#define _ALPM_CTL2_A 0x60954
#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24)
#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val)
#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16)
#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION(val) REG_FIELD_PREP(ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK, val)
#define ALPM_CTL2_NUMBER_OF_LTTPR_MASK REG_GENMASK(15, 12)
#define ALPM_CTL2_NUMBER_OF_LTTPR(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_OF_LTTPR_MASK, val)
#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(10, 8)
#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME(val) REG_FIELD_PREP(ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK, val)
#define ALPM_CTL2_FEC_DECODE_EN_POSITION_AFTER_WAKE_SR REG_BIT(4)
#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK REG_GENMASK(2, 0)
#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val)
#define _PORT_ALPM_CTL_A 0x16fa2c
#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A)
#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16)
#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val)
#define PORT_ALPM_CTL_SILENCE_PERIOD_MASK REG_GENMASK(7, 0)
#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val)
#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */

View file

@ -44,6 +44,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@ -2140,6 +2141,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->status;
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo_connector->output_flag))
return connector_status_unknown;
@ -2805,6 +2809,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@ -2880,6 +2885,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
intel_connector->base.polled = intel_connector->polled;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;

View file

@ -122,6 +122,15 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
}
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
struct intel_tc_port *tc = to_tc_port(dig_port);
return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
}
/*
* The display power domains used for TC ports depending on the
* platform and TC mode (legacy, DP-alt, TBT):
@ -986,10 +995,11 @@ xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
@ -1012,16 +1022,17 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
val = intel_de_read(i915, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
@ -1055,26 +1066,28 @@ static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
u32 val;
assert_tc_cold_blocked(tc);
val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
val = intel_de_read(i915, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
intel_de_write(i915, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
enum port port = tc->dig_port->base.port;
i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
assert_tc_cold_blocked(tc);
return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
@ -1590,7 +1603,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@ -1605,19 +1618,6 @@ bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
return tc_phy_hpd_live_status(tc) & mask;
}
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool is_connected;
mutex_lock(&tc->lock);
is_connected = intel_tc_port_connected_locked(encoder);
mutex_unlock(&tc->lock);
return is_connected;
}
static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
{
bool ret;

View file

@ -15,9 +15,9 @@ struct intel_encoder;
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);

View file

@ -40,6 +40,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@ -1327,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
* the active portion. Hence following this formula seems
* more trouble that it's worth.
*
* if (GRAPHICS_VER(dev_priv) == 4) {
* if (DISPLAY_VER(dev_priv) == 4) {
* num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
* den = tv_mode->clock;
* } else {
@ -1723,6 +1724,9 @@ intel_tv_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->status;
if (force) {
struct drm_atomic_state *state;
@ -1990,6 +1994,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
intel_connector->base.polled = intel_connector->polled;
drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);

View file

@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
@ -581,3 +582,132 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
static int intel_mode_vblank_start(const struct drm_display_mode *mode)
{
int vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
return vblank_start;
}
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
struct intel_vblank_evade_ctx *evade)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
evade->crtc = crtc;
evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
/*
* During fastsets/etc. the transcoder is still
* running with the old timings at this point.
*
* TODO: maybe just use the active timings here?
*/
if (intel_crtc_needs_modeset(new_crtc_state))
crtc_state = new_crtc_state;
else
crtc_state = old_crtc_state;
adjusted_mode = &crtc_state->hw.adjusted_mode;
if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
/* timing changes should happen with VRR disabled */
drm_WARN_ON(crtc->base.dev, intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr);
if (intel_vrr_is_push_sent(crtc_state))
evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
evade->min = evade->vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
evade->max = evade->vblank_start - 1;
/*
* M/N and TRANS_VTOTAL are double buffered on the transcoder's
* undelayed vblank, so with seamless M/N and LRR we must evade
* both vblanks.
*
* DSB execution waits for the transcoder's undelayed vblank,
* hence we must kick off the commit before that.
*/
if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr)
evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
}
/* must be called with vblank interrupt already enabled! */
int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
{
struct intel_crtc *crtc = evade->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
long timeout = msecs_to_jiffies_timeout(1);
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
int scanline;
if (evade->min <= 0 || evade->max <= 0)
return 0;
for (;;) {
/*
* prepare_to_wait() has a memory barrier, which guarantees
* other CPUs can see the task state update by the time we
* read the scanline.
*/
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
scanline = intel_get_crtc_scanline(crtc);
if (scanline < evade->min || scanline > evade->max)
break;
if (!timeout) {
drm_err(&i915->drm,
"Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
}
local_irq_enable();
timeout = schedule_timeout(timeout);
local_irq_disable();
}
finish_wait(wq, &wait);
/*
* On VLV/CHV DSI the scanline counter would appear to
* increment approx. 1/3 of a scanline before start of vblank.
* The registers still get latched at start of vblank however.
* This means we must not write any registers on the first
* line of vblank (since not the whole line is actually in
* vblank). And unfortunately we can't use the interrupt to
* wait here since it will fire too soon. We could use the
* frame start interrupt instead since it will fire after the
* critical scanline, but that would require more changes
* in the interrupt code. So for now we'll just do the nasty
* thing and poll for the bad scanline to pass us by.
*
* FIXME figure out if BXT+ DSI suffers from this as well
*/
while (evade->need_vlv_dsi_wa && scanline == evade->vblank_start)
scanline = intel_get_crtc_scanline(crtc);
return scanline;
}

View file

@ -13,6 +13,18 @@ struct drm_crtc;
struct intel_crtc;
struct intel_crtc_state;
struct intel_vblank_evade_ctx {
struct intel_crtc *crtc;
int min, max, vblank_start;
bool need_vlv_dsi_wa;
};
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
struct intel_vblank_evade_ctx *evade);
/* must be called with vblank interrupt already enabled! */
int intel_vblank_evade(struct intel_vblank_evade_ctx *evade);
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,

View file

@ -2624,3 +2624,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
error:
kfree(intel_fb);
}
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
enum pipe pipe = crtc->pipe;
u32 base;
if (!plane_state->uapi.visible)
return false;
base = intel_plane_ggtt_offset(plane_state);
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
if (plane_config->base == base)
return false;
intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
return true;
}

View file

@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
void skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);

View file

@ -443,12 +443,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
/*
* We store use_sagv_wm in the crtc state rather than relying on
* that bw state since we have no convenient way to get at the
* latter from the plane commit hooks (especially in the legacy
* cursor case).
*
* drm_atomic_check_only() gets upset if we pull more crtcs
* into the state, so we have to calculate this based on the
* individual intel_crtc_can_enable_sagv() rather than
* the overall intel_can_enable_sagv(). Otherwise the
* crtcs not included in the commit would not switch to the
* SAGV watermarks when we are about to enable SAGV, and that
* would lead to underruns. This does mean extra power draw
* when only a subset of the crtcs are blocking SAGV as the
* other crtcs can't be allowed to use the more optimal
* normal (ie. non-SAGV) watermarks.
*/
pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
DISPLAY_VER(i915) >= 12 &&
intel_crtc_can_enable_sagv(new_crtc_state);
if (intel_crtc_can_enable_sagv(new_crtc_state))
new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
else
@ -478,21 +501,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
/*
* We store use_sagv_wm in the crtc state rather than relying on
* that bw state since we have no convenient way to get at the
* latter from the plane commit hooks (especially in the legacy
* cursor case)
*/
pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
DISPLAY_VER(i915) >= 12 &&
intel_can_enable_sagv(i915, new_bw_state);
}
return 0;
}
@ -1367,7 +1375,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
if (plane_id == PLANE_CURSOR)
continue;
data_rate += crtc_state->rel_data_rate[plane_id];
@ -1514,12 +1522,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
return 0;
/* Allocate fixed number of blocks for cursor. */
if (DISPLAY_VER(i915) < 20) {
cursor_size = skl_cursor_allocation(crtc_state, num_active);
iter.size -= cursor_size;
skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
alloc->end - cursor_size, alloc->end);
}
cursor_size = skl_cursor_allocation(crtc_state, num_active);
iter.size -= cursor_size;
skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
alloc->end - cursor_size, alloc->end);
iter.data_rate = skl_total_relative_data_rate(crtc_state);
@ -1533,7 +1539,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) {
if (plane_id == PLANE_CURSOR) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@ -1581,7 +1587,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20)
if (plane_id == PLANE_CURSOR)
continue;
if (DISPLAY_VER(i915) < 11 &&

View file

@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
return ERR_PTR(-EINVAL);
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
offset + size > mem->io_size &&
offset + size > resource_size(&mem->io) &&
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
return ERR_PTR(-ENOSPC);

View file

@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region));
mem->io = DEFINE_RES_MEM(mem->io.start,
min(resource_size(&mem->io),
resource_size(&mem->region)));
i915->dsm.usable_size = resource_size(&mem->region);
@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
* With discrete devices, where we lack a mappable aperture there is no
* possible way to ever access this memory on the CPU side.
*/
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
!(flags & I915_BO_ALLOC_GPU_ONLY))
return -ENOSPC;
@ -826,7 +828,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
static int init_stolen_lmem(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int err;
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
@ -838,14 +839,10 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
return 0;
}
if (mem->io_size &&
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
if (resource_size(&mem->io) &&
!io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
goto err_cleanup;
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
&mem->io_start);
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
return 0;
err_cleanup:
@ -855,7 +852,7 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
static int release_stolen_lmem(struct intel_memory_region *mem)
{
if (mem->io_size)
if (resource_size(&mem->io))
io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
return 0;
@ -938,13 +935,17 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
if (i915_direct_stolen_access(i915)) {
drm_dbg(&i915->drm, "Using direct DSM access\n");
io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
io_size = dsm_size;
} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {

View file

@ -142,13 +142,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) {
} else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
place->lpfn = mr->io_size >> PAGE_SHIFT;
WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
@ -1083,7 +1083,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@ -1094,8 +1094,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
ERR_PTR(err));
drm_dbg_ratelimited(dev,
"Unable to make resource CPU accessible(err = %pe)\n",
ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;

View file

@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
int err;
total = 0;
size = mr->io_size;
size = resource_size(&mr->io);
do {
struct drm_i915_gem_object *obj;
@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg)
struct intel_memory_region *mixed[] = { mr, system };
struct intel_memory_region *single[] = { mr };
struct ttm_resource_manager *man = mr->region_private;
resource_size_t saved_io_size;
struct resource saved_io;
int err;
if (mr->private)
continue;
if (!mr->io_size)
if (!resource_size(&mr->io))
continue;
/*
* For testing purposes let's force small BAR, if not already
* present.
*/
saved_io_size = mr->io_size;
if (mr->io_size == mr->total) {
resource_size_t io_size = mr->io_size;
saved_io = mr->io;
if (resource_size(&mr->io) == mr->total) {
resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1);
if (io_size < PAGE_SIZE)
continue;
mr->io_size = io_size;
mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
i915_ttm_buddy_man_force_visible_size(man,
io_size >> PAGE_SHIFT);
}
@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
mr->io_size = saved_io_size;
mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man,
mr->io_size >> PAGE_SHIFT);
resource_size(&mr->io) >> PAGE_SHIFT);
if (err)
return err;
}

View file

@ -24,6 +24,7 @@
#include "intel_ring.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
@ -1152,13 +1153,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
u32 pte_flags;
int ret;
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
if (i915_direct_stolen_access(i915)) {
drm_dbg(&i915->drm, "Using direct GSM access\n");
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
} else {
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
}
if (needs_wc_ggtt_mapping(i915))
ggtt->gsm = ioremap_wc(phys_addr, size);

View file

@ -24,7 +24,8 @@
bool i915_ggtt_require_binder(struct drm_i915_private *i915)
{
/* Wa_13010847436 & Wa_14019519902 */
return MEDIA_VER_FULL(i915) == IP_VER(13, 0);
return !i915_direct_stolen_access(i915) &&
MEDIA_VER_FULL(i915) == IP_VER(13, 0);
}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)

View file

@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem)
int ret;
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
mem->io_size))
mem->io.start,
resource_size(&mem->io)))
return -EIO;
ret = intel_region_ttm_init(mem);
@ -240,7 +240,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size -= tile_stolen;
} else {
/* Stolen starts from GSMBASE without CCS */
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE);
}
i915_resize_lmem_bar(i915, lmem_size);
@ -273,14 +273,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (err)
goto err_region_put;
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
&mem->io_start);
drm_info(&i915->drm, "Local memory IO size: %pa\n",
&mem->io_size);
drm_info(&i915->drm, "Local memory available: %pa\n",
&lmem_size);
if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
(u64)io_size >> 20);

View file

@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
* of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory.
*/
if (mr && size > mr->io_size / 4)
size = mr->io_size / 4;
if (mr && size > resource_size(&mr->io) / 4)
size = resource_size(&mr->io) / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
}

View file

@ -574,7 +574,7 @@ int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
ret = intel_vgpu_register_reg(vgpu,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
&intel_vgpu_regops_opregion, OPREGION_SIZE,
&intel_vgpu_regops_opregion, INTEL_GVT_OPREGION_SIZE,
VFIO_REGION_INFO_FLAG_READ, base);
return ret;

View file

@ -1003,8 +1003,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(i915);
intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(i915);
drm_atomic_helper_shutdown(&i915->drm);
}
@ -1014,6 +1016,9 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable_interrupts(i915);
intel_hpd_cancel_work(i915);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(i915);
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
@ -1080,8 +1085,11 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(dev_priv);
if (HAS_DISPLAY(dev_priv))
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(dev_priv);
}
pci_save_state(pdev);
@ -1092,6 +1100,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(dev_priv);
intel_suspend_encoders(dev_priv);
/* Must be called before GGTT is suspended. */
@ -1103,8 +1114,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(dev_priv, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
dev_priv->suspend_count++;
intel_dmc_suspend(dev_priv);
@ -1243,15 +1252,21 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_init_hw(dev_priv);
intel_clock_gating_init(dev_priv);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(dev_priv);
intel_hpd_init(dev_priv);
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(dev_priv);
intel_display_driver_resume(dev_priv);
intel_hpd_poll_disable(dev_priv);
if (HAS_DISPLAY(dev_priv))
if (HAS_DISPLAY(dev_priv)) {
intel_display_driver_enable_user_access(dev_priv);
drm_kms_helper_poll_enable(dev);
}
intel_hpd_poll_disable(dev_priv);
intel_opregion_resume(dev_priv);

View file

@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t offset = dma - mem->region.start;
void __iomem *s;
if (offset + PAGE_SIZE > mem->io_size) {
if (offset + PAGE_SIZE > resource_size(&mem->io)) {
ret = -EINVAL;
break;
}

View file

@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.probed_size = mr->total;
if (mr->type == INTEL_MEMORY_LOCAL)
info.probed_cpu_visible_size = mr->io_size;
info.probed_cpu_visible_size = resource_size(&mr->io);
else
info.probed_cpu_visible_size = mr->total;

View file

@ -3059,6 +3059,7 @@
#define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define _CURAPOS_ERLY_TPT 0x7008c
#define CURSOR_POS_Y_SIGN REG_BIT(31)
#define CURSOR_POS_Y_MASK REG_GENMASK(30, 16)
#define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y))
@ -3087,6 +3088,7 @@
#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT)
#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A)
@ -5412,6 +5414,9 @@
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914)
#define STOLEN_ACCESS_ALLOWED 0x1
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
@ -5652,6 +5657,10 @@ enum skl_power_gate {
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
@ -5684,6 +5693,8 @@ enum skl_power_gate {
/* Known as DDI_CTL_DE in MTL+ */
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1 << 31)
#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
@ -6314,9 +6325,10 @@ enum skl_power_gate {
#define GMS_MASK REG_GENMASK(15, 8)
#define GGMS_MASK REG_GENMASK(7, 6)
#define GEN12_GSMBASE _MMIO(0x108100)
#define GEN12_DSMBASE _MMIO(0x1080C0)
#define GEN12_BDSM_MASK REG_GENMASK64(63, 20)
#define GEN6_GSMBASE _MMIO(0x108100)
#define GEN6_DSMBASE _MMIO(0x1080C0)
#define GEN6_BDSM_MASK REG_GENMASK64(31, 20)
#define GEN11_BDSM_MASK REG_GENMASK64(63, 20)
#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
#define SGSI_SIDECLK_DIS REG_BIT(17)

View file

@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
@ -125,3 +126,19 @@ bool i915_vtd_active(struct drm_i915_private *i915)
/* Running as a guest, we assume the host is enforcing VT'd */
return i915_run_as_guest();
}
bool i915_direct_stolen_access(struct drm_i915_private *i915)
{
/*
* Wa_22018444074
*
* Access via BAR can hang MTL, go directly to GSM/DSM,
* except for VM guests which won't have access to it.
*
* Normally this would not work but on MTL the system firmware
* should have relaxed the access permissions sufficiently.
* 0x138914==0x1 indicates that the firmware has done its job.
*/
return IS_METEORLAKE(i915) && !i915_run_as_guest() &&
intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED;
}

View file

@ -391,4 +391,6 @@ static inline bool i915_run_as_guest(void)
bool i915_vtd_active(struct drm_i915_private *i915);
bool i915_direct_stolen_access(struct drm_i915_private *i915);
#endif /* !__I915_UTILS_H */

View file

@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
if (memchr_inv(result, value, sizeof(result))) {
dev_err(mem->i915->drm.dev,
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
&mem->region, &mem->io_start, &offset, caller,
&mem->region, &mem->io.start, &offset, caller,
value, result[0], result[1], result[2]);
return -EINVAL;
}
@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem,
int err;
int i;
va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
if (!va) {
dev_err(mem->i915->drm.dev,
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
&mem->io_start, &offset, caller);
&mem->io.start, &offset, caller);
return -EFAULT;
}
@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem,
resource_size_t last, page;
int err;
if (mem->io_size < PAGE_SIZE)
if (resource_size(&mem->io) < PAGE_SIZE)
return 0;
last = mem->io_size - PAGE_SIZE;
last = resource_size(&mem->io) - PAGE_SIZE;
/*
* Quick test to check read/write access to the iomap (backing store).
@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
int err = 0;
if (!mem->io_start)
if (!mem->io.start)
return 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->i915 = i915;
mem->region = DEFINE_RES_MEM(start, size);
mem->io_start = io_start;
mem->io_size = io_size;
mem->io = DEFINE_RES_MEM(io_start, io_size);
mem->min_page_size = min_page_size;
mem->ops = ops;
mem->total = size;
@ -373,6 +372,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
i915->mm.regions[i] = mem;
}
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
struct intel_memory_region *mem = i915->mm.regions[i];
u64 region_size, io_size;
if (!mem)
continue;
region_size = resource_size(&mem->region) >> 20;
io_size = resource_size(&mem->io) >> 20;
if (resource_size(&mem->io))
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
else
drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
mem->id, mem->name, region_size, &mem->region);
}
return 0;
out_cleanup:

View file

@ -71,8 +71,7 @@ struct intel_memory_region {
struct io_mapping iomap;
struct resource region;
resource_size_t io_start;
resource_size_t io_size;
struct resource io;
resource_size_t min_page_size;
resource_size_t total;

View file

@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
resource_size(&mem->region),
mem->io_size,
resource_size(&mem->io),
mem->min_page_size, PAGE_SIZE);
if (ret)
return ret;
@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
goto out;
}
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
} else if (mem->io_size && mem->io_size < mem->total) {
} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place.fpfn = 0;
if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
ret = -E2BIG;
goto out;
}
place.lpfn = mem->io_size >> PAGE_SHIFT;
place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
}
}

View file

@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
u64 start = drm_buddy_block_offset(block);
u64 end = start + drm_buddy_block_size(mm, block);
if (start < mr->io_size)
total += min_t(u64, end, mr->io_size) - start;
if (start < resource_size(&mr->io))
total += min_t(u64, end, resource_size(&mr->io)) - start;
}
return total;

View file

@ -140,11 +140,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
case INTEL_PCH_MTP_DEVICE_ID_TYPE:
case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
return PCH_MTP;
default:
return PCH_NONE;
}
@ -173,9 +168,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
if (IS_METEORLAKE(dev_priv))
id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
@ -225,6 +218,13 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 20) {
dev_priv->pch_type = PCH_LNL;
return;
} else if (IS_METEORLAKE(dev_priv)) {
/*
* Both north display and south display are on the SoC die.
* The real PCH is uninvolved in display.
*/
dev_priv->pch_type = PCH_MTL;
return;
} else if (IS_DG2(dev_priv)) {
dev_priv->pch_type = PCH_DG2;
return;

View file

@ -25,11 +25,11 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
PCH_DG2,
PCH_MTL,
PCH_LNL,
};
@ -59,16 +59,12 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_LNL(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LNL)
#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)

View file

@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@ -18,19 +19,20 @@
#include "intel_plane_initial.h"
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
const struct intel_initial_plane_config *plane_config,
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
struct drm_framebuffer **fb)
{
struct drm_i915_private *i915 = to_i915(this->base.dev);
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
continue;
@ -38,7 +40,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
if (!plane_state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) {
*fb = plane_state->hw.fb;
return true;
}
@ -178,10 +180,10 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
static void
intel_find_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
struct intel_initial_plane_config plane_configs[])
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
@ -201,7 +203,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
if (intel_alloc_initial_plane_obj(crtc, plane_config))
fb = &plane_config->fb->base;
else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb))
else if (!intel_reuse_initial_plane_obj(crtc, plane_configs, &fb))
goto nofb;
plane_state->uapi.rotation = plane_config->rotation;
@ -267,25 +269,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
void intel_initial_plane_config(struct drm_i915_private *i915)
{
struct xe_device *xe = to_xe_device(crtc->base.dev);
struct intel_initial_plane_config plane_config = {};
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
plane_config_fini(&plane_config);
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, plane_configs);
if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
}
}

View file

@ -718,6 +718,7 @@
# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */
# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */
# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */
# define DP_PSR_ENABLE_SU_REGION_ET BIT(7) /* eDP 1.5 */
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)

View file

@ -602,6 +602,9 @@ void __drm_err(const char *format, ...);
drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
#define drm_dbg_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__)
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)

View file

@ -751,10 +751,13 @@
/* MTL */
#define INTEL_MTL_IDS(info) \
INTEL_VGA_DEVICE(0x7D40, info), \
INTEL_VGA_DEVICE(0x7D41, info), \
INTEL_VGA_DEVICE(0x7D45, info), \
INTEL_VGA_DEVICE(0x7D51, info), \
INTEL_VGA_DEVICE(0x7D55, info), \
INTEL_VGA_DEVICE(0x7D60, info), \
INTEL_VGA_DEVICE(0x7D67, info), \
INTEL_VGA_DEVICE(0x7DD1, info), \
INTEL_VGA_DEVICE(0x7DD5, info)
#endif /* _I915_PCIIDS_H */