2014-11-14 16:52:28 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2019-04-05 11:00:09 +00:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2021-09-14 21:25:06 +00:00
|
|
|
#include <drm/drm_damage_helper.h>
|
2023-08-28 08:31:07 +00:00
|
|
|
#include <drm/drm_debugfs.h>
|
2019-04-05 11:00:09 +00:00
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2022-11-09 15:35:22 +00:00
|
|
|
#include "i915_reg.h"
|
2019-10-31 11:25:59 +00:00
|
|
|
#include "intel_atomic.h"
|
2021-10-20 22:33:36 +00:00
|
|
|
#include "intel_crtc.h"
|
2023-11-01 11:42:08 +00:00
|
|
|
#include "intel_ddi.h"
|
2021-04-30 14:39:44 +00:00
|
|
|
#include "intel_de.h"
|
2019-08-06 11:39:33 +00:00
|
|
|
#include "intel_display_types.h"
|
2023-01-04 15:32:58 +00:00
|
|
|
#include "intel_dp.h"
|
2021-01-20 10:18:34 +00:00
|
|
|
#include "intel_dp_aux.h"
|
2023-08-30 08:51:27 +00:00
|
|
|
#include "intel_frontbuffer.h"
|
2021-01-20 10:18:34 +00:00
|
|
|
#include "intel_hdmi.h"
|
2019-04-05 11:00:09 +00:00
|
|
|
#include "intel_psr.h"
|
2023-03-31 09:09:49 +00:00
|
|
|
#include "intel_psr_regs.h"
|
2021-07-23 17:42:37 +00:00
|
|
|
#include "intel_snps_phy.h"
|
2021-02-05 14:48:36 +00:00
|
|
|
#include "skl_universal_plane.h"
|
2019-04-05 11:00:09 +00:00
|
|
|
|
2014-11-14 16:52:29 +00:00
|
|
|
/**
|
|
|
|
* DOC: Panel Self Refresh (PSR/SRD)
|
|
|
|
*
|
|
|
|
* Since Haswell Display controller supports Panel Self-Refresh on display
|
|
|
|
* panels witch have a remote frame buffer (RFB) implemented according to PSR
|
|
|
|
* spec in eDP1.3. PSR feature allows the display to go to lower standby states
|
|
|
|
* when system is idle but display is on as it eliminates display refresh
|
|
|
|
* request to DDR memory completely as long as the frame buffer for that
|
|
|
|
* display is unchanged.
|
|
|
|
*
|
|
|
|
* Panel Self Refresh must be supported by both Hardware (source) and
|
|
|
|
* Panel (sink).
|
|
|
|
*
|
|
|
|
* PSR saves power by caching the framebuffer in the panel RFB, which allows us
|
|
|
|
* to power down the link and memory controller. For DSI panels the same idea
|
|
|
|
* is called "manual mode".
|
|
|
|
*
|
|
|
|
* The implementation uses the hardware-based PSR support which automatically
|
|
|
|
* enters/exits self-refresh mode. The hardware takes care of sending the
|
|
|
|
* required DP aux message and could even retrain the link (that part isn't
|
|
|
|
* enabled yet though). The hardware also keeps track of any frontbuffer
|
|
|
|
* changes to know when to exit self-refresh mode again. Unfortunately that
|
|
|
|
* part doesn't work too well, hence why the i915 PSR support uses the
|
|
|
|
* software frontbuffer tracking to make sure it doesn't miss a screen
|
|
|
|
* update. For this integration intel_psr_invalidate() and intel_psr_flush()
|
|
|
|
* get called by the frontbuffer tracking code. Note that because of locking
|
|
|
|
* issues the self-refresh re-enable code is done from a work queue, which
|
|
|
|
* must be correctly synchronized/cancelled when shutting down the pipe."
|
2020-02-05 21:49:45 +00:00
|
|
|
*
|
|
|
|
* DC3CO (DC3 clock off)
|
|
|
|
*
|
|
|
|
* On top of PSR2, GEN12 adds a intermediate power savings state that turns
|
|
|
|
* clock off automatically during PSR2 idle state.
|
|
|
|
* The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
|
|
|
|
* entry/exit allows the HW to enter a low-power state even when page flipping
|
|
|
|
* periodically (for instance a 30fps video playback scenario).
|
|
|
|
*
|
|
|
|
* Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
|
|
|
|
* so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
|
|
|
|
* frames, if no other flip occurs and the function above is executed, DC3CO is
|
|
|
|
* disabled and PSR2 is configured to enter deep sleep, resetting again in case
|
|
|
|
* of another flip.
|
|
|
|
* Front buffer modifications do not trigger DC3CO activation on purpose as it
|
|
|
|
* would bring a lot of complexity and most of the moderns systems will only
|
|
|
|
* use page flips.
|
2014-11-14 16:52:29 +00:00
|
|
|
*/
|
|
|
|
|
2023-04-11 19:14:26 +00:00
|
|
|
/*
|
|
|
|
* Description of PSR mask bits:
|
|
|
|
*
|
|
|
|
* EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
|
|
|
|
*
|
|
|
|
* When unmasked (nearly) all display register writes (eg. even
|
|
|
|
* SWF) trigger a PSR exit. Some registers are excluded from this
|
|
|
|
* and they have a more specific mask (described below). On icl+
|
|
|
|
* this bit no longer exists and is effectively always set.
|
|
|
|
*
|
|
|
|
* PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
|
|
|
|
*
|
|
|
|
* When unmasked (nearly) all pipe/plane register writes
|
|
|
|
* trigger a PSR exit. Some plane registers are excluded from this
|
|
|
|
* and they have a more specific mask (described below).
|
|
|
|
*
|
|
|
|
* CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
|
|
|
|
* PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
|
|
|
|
* EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
|
|
|
|
*
|
|
|
|
* When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
|
|
|
|
* SPR_SURF/CURBASE are not included in this and instead are
|
|
|
|
* controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
|
|
|
|
* EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
|
|
|
|
*
|
|
|
|
* PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
|
|
|
|
* EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
|
|
|
|
*
|
|
|
|
* When unmasked PSR is blocked as long as the sprite
|
|
|
|
* plane is enabled. skl+ with their universal planes no
|
|
|
|
* longer have a mask bit like this, and no plane being
|
|
|
|
* enabledb blocks PSR.
|
|
|
|
*
|
|
|
|
* PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
|
|
|
|
* EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
|
|
|
|
*
|
|
|
|
* When umasked CURPOS writes trigger a PSR exit. On skl+
|
|
|
|
* this doesn't exit but CURPOS is included in the
|
|
|
|
* PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
|
|
|
|
*
|
|
|
|
* PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
|
|
|
|
* EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
|
|
|
|
*
|
|
|
|
* When unmasked PSR is blocked as long as vblank and/or vsync
|
|
|
|
* interrupt is unmasked in IMR *and* enabled in IER.
|
|
|
|
*
|
|
|
|
* CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
|
|
|
|
* CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
|
|
|
|
*
|
|
|
|
* Selectcs whether PSR exit generates an extra vblank before
|
|
|
|
* the first frame is transmitted. Also note the opposite polarity
|
|
|
|
* if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
|
|
|
|
* unmasked==do not generate the extra vblank).
|
|
|
|
*
|
|
|
|
* With DC states enabled the extra vblank happens after link training,
|
|
|
|
* with DC states disabled it happens immediately upuon PSR exit trigger.
|
|
|
|
* No idea as of now why there is a difference. HSW/BDW (which don't
|
|
|
|
* even have DMC) always generate it after link training. Go figure.
|
|
|
|
*
|
|
|
|
* Unfortunately CHICKEN_TRANS itself seems to be double buffered
|
|
|
|
* and thus won't latch until the first vblank. So with DC states
|
|
|
|
* enabled the register effctively uses the reset value during DC5
|
|
|
|
* exit+PSR exit sequence, and thus the bit does nothing until
|
|
|
|
* latched by the vblank that it was trying to prevent from being
|
|
|
|
* generated in the first place. So we should probably call this
|
|
|
|
* one a chicken/egg bit instead on skl+.
|
|
|
|
*
|
|
|
|
* In standby mode (as opposed to link-off) this makes no difference
|
|
|
|
* as the timing generator keeps running the whole time generating
|
|
|
|
* normal periodic vblanks.
|
|
|
|
*
|
|
|
|
* WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
|
|
|
|
* and doing so makes the behaviour match the skl+ reset value.
|
|
|
|
*
|
|
|
|
* CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
|
|
|
|
* CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
|
|
|
|
*
|
|
|
|
* On BDW without this bit is no vblanks whatsoever are
|
|
|
|
* generated after PSR exit. On HSW this has no apparant effect.
|
|
|
|
* WaPsrDPRSUnmaskVBlankInSRD says to set this.
|
|
|
|
*
|
|
|
|
* The rest of the bits are more self-explanatory and/or
|
|
|
|
* irrelevant for normal operation.
|
|
|
|
*/
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
bool intel_encoder_can_psr(struct intel_encoder *encoder)
|
|
|
|
{
|
|
|
|
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
|
|
|
|
return CAN_PSR(enc_to_intel_dp(encoder)) ||
|
|
|
|
CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static bool psr_global_enabled(struct intel_dp *intel_dp)
|
2018-08-09 14:21:01 +00:00
|
|
|
{
|
2022-06-08 20:33:44 +00:00
|
|
|
struct intel_connector *connector = intel_dp->attached_connector;
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
|
2018-08-09 14:21:01 +00:00
|
|
|
case I915_PSR_DEBUG_DEFAULT:
|
2023-10-24 12:40:50 +00:00
|
|
|
if (i915->display.params.enable_psr == -1)
|
2022-06-08 20:33:44 +00:00
|
|
|
return connector->panel.vbt.psr.enable;
|
2023-10-24 12:40:50 +00:00
|
|
|
return i915->display.params.enable_psr;
|
2018-08-09 14:21:01 +00:00
|
|
|
case I915_PSR_DEBUG_DISABLE:
|
|
|
|
return false;
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static bool psr2_global_enabled(struct intel_dp *intel_dp)
|
2018-08-08 14:19:11 +00:00
|
|
|
{
|
2022-02-24 20:25:23 +00:00
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
|
2019-01-17 20:55:45 +00:00
|
|
|
case I915_PSR_DEBUG_DISABLE:
|
2018-08-08 14:19:11 +00:00
|
|
|
case I915_PSR_DEBUG_FORCE_PSR1:
|
|
|
|
return false;
|
|
|
|
default:
|
2023-10-24 12:40:50 +00:00
|
|
|
if (i915->display.params.enable_psr == 1)
|
2022-02-24 20:25:23 +00:00
|
|
|
return false;
|
2020-10-07 19:52:37 +00:00
|
|
|
return true;
|
2018-08-08 14:19:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-03 07:20:11 +00:00
|
|
|
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
|
|
|
|
EDP_PSR_ERROR(intel_dp->psr.transcoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
|
|
|
|
EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
|
|
|
|
EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
|
|
|
|
EDP_PSR_MASK(intel_dp->psr.transcoder);
|
|
|
|
}
|
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
2018-11-20 09:23:24 +00:00
|
|
|
{
|
2023-06-09 14:13:55 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_CTL(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return HSW_SRD_CTL;
|
2023-06-09 14:13:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
2023-06-09 14:13:55 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_DEBUG(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return HSW_SRD_DEBUG;
|
2023-06-09 14:13:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
2023-06-09 14:13:55 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_PERF_CNT(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return HSW_SRD_PERF_CNT;
|
2023-06-09 14:13:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
2023-06-09 14:13:55 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_STATUS(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return HSW_SRD_STATUS;
|
2023-06-09 14:13:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
|
|
|
if (DISPLAY_VER(dev_priv) >= 12)
|
|
|
|
return TRANS_PSR_IMR(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return EDP_PSR_IMR;
|
|
|
|
}
|
2019-09-04 21:34:14 +00:00
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
2022-10-03 07:20:11 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 12)
|
2023-06-09 14:13:54 +00:00
|
|
|
return TRANS_PSR_IIR(cpu_transcoder);
|
2022-10-03 07:20:11 +00:00
|
|
|
else
|
2023-06-09 14:13:54 +00:00
|
|
|
return EDP_PSR_IIR;
|
|
|
|
}
|
|
|
|
|
2023-06-09 14:13:56 +00:00
|
|
|
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder)
|
|
|
|
{
|
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_AUX_CTL(cpu_transcoder);
|
|
|
|
else
|
|
|
|
return HSW_SRD_AUX_CTL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
|
|
|
|
enum transcoder cpu_transcoder, int i)
|
|
|
|
{
|
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
|
|
|
return EDP_PSR_AUX_DATA(cpu_transcoder, i);
|
|
|
|
else
|
|
|
|
return HSW_SRD_AUX_DATA(i);
|
|
|
|
}
|
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
static void psr_irq_control(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
|
|
|
u32 mask;
|
2019-09-04 21:34:15 +00:00
|
|
|
|
2022-10-03 07:20:11 +00:00
|
|
|
mask = psr_irq_psr_error_bit_get(intel_dp);
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
|
2022-10-03 07:20:11 +00:00
|
|
|
mask |= psr_irq_post_exit_bit_get(intel_dp) |
|
|
|
|
psr_irq_pre_entry_bit_get(intel_dp);
|
2019-09-04 21:34:14 +00:00
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
|
|
|
|
psr_irq_mask_get(intel_dp), ~mask);
|
2018-04-05 01:37:17 +00:00
|
|
|
}
|
|
|
|
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
static void psr_event_print(struct drm_i915_private *i915,
|
|
|
|
u32 val, bool psr2_enabled)
|
2018-04-25 21:23:32 +00:00
|
|
|
{
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_GRAPHICS_RESET)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_PCH_INTERRUPT)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_MEMORY_UP)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tMemory up\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_REGISTER_UPDATE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tRegister updated\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_HDCP_ENABLE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_VBI_ENABLE)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if (val & PSR_EVENT_LPSP_MODE_EXIT)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
|
2018-04-25 21:23:32 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
|
2018-04-05 01:37:17 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2021-02-04 13:40:14 +00:00
|
|
|
ktime_t time_ns = ktime_get();
|
2019-09-04 21:34:15 +00:00
|
|
|
|
2022-10-03 07:20:11 +00:00
|
|
|
if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.last_entry_attempt = time_ns;
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
|
|
|
|
transcoder_name(cpu_transcoder));
|
2019-09-04 21:34:14 +00:00
|
|
|
}
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2022-10-03 07:20:11 +00:00
|
|
|
if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.last_exit = time_ns;
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"[transcoder %s] PSR exit completed\n",
|
|
|
|
transcoder_name(cpu_transcoder));
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 9) {
|
2023-04-11 19:14:25 +00:00
|
|
|
u32 val;
|
2018-04-05 01:37:17 +00:00
|
|
|
|
2023-04-11 19:14:25 +00:00
|
|
|
val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
|
|
|
|
|
|
|
|
psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
|
2018-04-03 21:24:20 +00:00
|
|
|
}
|
2019-09-04 21:34:14 +00:00
|
|
|
}
|
2018-04-05 01:37:17 +00:00
|
|
|
|
2022-10-03 07:20:11 +00:00
|
|
|
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
|
2019-09-04 21:34:14 +00:00
|
|
|
transcoder_name(cpu_transcoder));
|
2018-04-25 21:23:32 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.irq_aux_error = true;
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2019-09-04 21:34:14 +00:00
|
|
|
/*
|
|
|
|
* If this interruption is not masked it will keep
|
|
|
|
* interrupting so fast that it prevents the scheduled
|
|
|
|
* work to run.
|
|
|
|
* Also after a PSR error, we don't want to arm PSR
|
|
|
|
* again so we don't care about unmask the interruption
|
|
|
|
* or unset irq_aux_error.
|
|
|
|
*/
|
2023-06-09 14:13:54 +00:00
|
|
|
intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
|
|
|
|
0, psr_irq_psr_error_bit_get(intel_dp));
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2023-06-08 13:35:45 +00:00
|
|
|
queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
|
2018-11-21 22:54:39 +00:00
|
|
|
}
|
2018-04-05 01:37:17 +00:00
|
|
|
}
|
|
|
|
|
2018-02-23 22:15:17 +00:00
|
|
|
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
|
|
|
|
{
|
2019-01-16 09:15:19 +00:00
|
|
|
u8 alpm_caps = 0;
|
2018-02-23 22:15:17 +00:00
|
|
|
|
|
|
|
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
|
|
|
|
&alpm_caps) != 1)
|
|
|
|
return false;
|
|
|
|
return alpm_caps & DP_ALPM_CAP;
|
|
|
|
}
|
|
|
|
|
2018-03-28 22:30:44 +00:00
|
|
|
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
|
|
|
|
{
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
2018-05-11 19:51:44 +00:00
|
|
|
u8 val = 8; /* assume the worst if we can't read the value */
|
2018-03-28 22:30:44 +00:00
|
|
|
|
|
|
|
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
|
|
|
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
|
|
|
|
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
|
|
|
|
else
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"Unable to get sink synchronization latency, assuming 8 frames\n");
|
2018-03-28 22:30:44 +00:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
|
2018-12-04 00:34:03 +00:00
|
|
|
{
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
2018-12-04 00:34:03 +00:00
|
|
|
ssize_t r;
|
2021-06-16 20:31:53 +00:00
|
|
|
u16 w;
|
|
|
|
u8 y;
|
|
|
|
|
|
|
|
/* If sink don't have specific granularity requirements set legacy ones */
|
|
|
|
if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
|
|
|
|
/* As PSR2 HW sends full lines, we do not care about x granularity */
|
|
|
|
w = 4;
|
|
|
|
y = 4;
|
|
|
|
goto exit;
|
|
|
|
}
|
2018-12-04 00:34:03 +00:00
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
|
2018-12-04 00:34:03 +00:00
|
|
|
if (r != 2)
|
drm/i915/psr: use struct drm_device based logging
Convert all the DRM_* logging macros to the struct drm_device based
macros to provide device specific logging.
No functional changes.
Generated using the following semantic patch, originally written by
Wambui Karuga <wambui.karugax@gmail.com>, with manual fixups on top:
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_NOTE(
+drm_notice(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cac03aba0a363c8f704035f1f771c73385235a35.1584714939.git.jani.nikula@intel.com
2020-03-20 14:36:37 +00:00
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"Unable to read DP_PSR2_SU_X_GRANULARITY\n");
|
2018-12-04 00:34:03 +00:00
|
|
|
/*
|
|
|
|
* Spec says that if the value read is 0 the default granularity should
|
|
|
|
* be used instead.
|
|
|
|
*/
|
2021-06-16 20:31:53 +00:00
|
|
|
if (r != 2 || w == 0)
|
|
|
|
w = 4;
|
2018-12-04 00:34:03 +00:00
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
|
|
|
|
if (r != 1) {
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
|
|
|
|
y = 4;
|
|
|
|
}
|
|
|
|
if (y == 0)
|
|
|
|
y = 1;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
intel_dp->psr.su_w_granularity = w;
|
|
|
|
intel_dp->psr.su_y_granularity = y;
|
2018-12-04 00:34:03 +00:00
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
u8 pr_dpcd = 0;
|
|
|
|
|
|
|
|
intel_dp->psr.sink_panel_replay_support = false;
|
|
|
|
drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
|
|
|
|
|
|
|
|
if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"Panel replay is not supported by panel\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"Panel replay is supported by panel\n");
|
|
|
|
intel_dp->psr.sink_panel_replay_support = true;
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:22:59 +00:00
|
|
|
static void _psr_init_dpcd(struct intel_dp *intel_dp)
|
2018-02-23 22:15:17 +00:00
|
|
|
{
|
2023-11-08 07:22:59 +00:00
|
|
|
struct drm_i915_private *i915 =
|
2018-02-23 22:15:17 +00:00
|
|
|
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
|
|
|
|
|
2023-11-08 07:22:59 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
intel_dp->psr_dpcd[0]);
|
2018-05-11 19:51:41 +00:00
|
|
|
|
2020-09-15 16:49:13 +00:00
|
|
|
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
|
2023-11-08 07:22:59 +00:00
|
|
|
drm_dbg_kms(&i915->drm,
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
"PSR support not currently available for this panel\n");
|
2018-12-04 00:33:55 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-11 19:51:41 +00:00
|
|
|
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
|
2023-11-08 07:22:59 +00:00
|
|
|
drm_dbg_kms(&i915->drm,
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
"Panel lacks power state control, PSR cannot be enabled\n");
|
2018-05-11 19:51:41 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-12-04 00:33:55 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.sink_support = true;
|
|
|
|
intel_dp->psr.sink_sync_latency =
|
2018-05-25 03:30:47 +00:00
|
|
|
intel_dp_get_sink_sync_latency(intel_dp);
|
2018-02-23 22:15:17 +00:00
|
|
|
|
2023-11-08 07:22:59 +00:00
|
|
|
if (DISPLAY_VER(i915) >= 9 &&
|
|
|
|
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
|
2018-05-11 19:51:45 +00:00
|
|
|
bool y_req = intel_dp->psr_dpcd[1] &
|
|
|
|
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
|
|
|
|
bool alpm = intel_dp_get_alpm_status(intel_dp);
|
|
|
|
|
2018-03-28 22:30:40 +00:00
|
|
|
/*
|
|
|
|
* All panels that supports PSR version 03h (PSR2 +
|
|
|
|
* Y-coordinate) can handle Y-coordinates in VSC but we are
|
|
|
|
* only sure that it is going to be used when required by the
|
|
|
|
* panel. This way panel is capable to do selective update
|
|
|
|
* without a aux frame sync.
|
|
|
|
*
|
|
|
|
* To support PSR version 02h and PSR version 03h without
|
|
|
|
* Y-coordinate requirement panels we would need to enable
|
|
|
|
* GTC first.
|
|
|
|
*/
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.sink_psr2_support = y_req && alpm;
|
2023-11-08 07:22:59 +00:00
|
|
|
drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.sink_psr2_support ? "" : "not ");
|
2023-11-08 07:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-23 22:15:17 +00:00
|
|
|
|
2023-11-08 07:22:59 +00:00
|
|
|
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
|
|
|
{
|
2023-11-08 07:23:00 +00:00
|
|
|
_panel_replay_init_dpcd(intel_dp);
|
|
|
|
|
2023-11-08 07:22:59 +00:00
|
|
|
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
|
|
|
|
sizeof(intel_dp->psr_dpcd));
|
|
|
|
|
|
|
|
if (intel_dp->psr_dpcd[0])
|
|
|
|
_psr_init_dpcd(intel_dp);
|
|
|
|
|
|
|
|
if (intel_dp->psr.sink_psr2_support) {
|
|
|
|
intel_dp->psr.colorimetry_support =
|
|
|
|
intel_dp_get_colorimetry_status(intel_dp);
|
|
|
|
intel_dp_get_su_granularity(intel_dp);
|
2018-02-23 22:15:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-09 14:13:56 +00:00
|
|
|
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
|
|
|
u32 aux_clock_divider, aux_ctl;
|
|
|
|
/* write DP_SET_POWER=D0 */
|
|
|
|
static const u8 aux_msg[] = {
|
|
|
|
[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
|
|
|
|
[1] = (DP_SET_POWER >> 8) & 0xff,
|
|
|
|
[2] = DP_SET_POWER & 0xff,
|
|
|
|
[3] = 1 - 1,
|
|
|
|
[4] = DP_SET_POWER_D0,
|
|
|
|
};
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(aux_msg) > 20);
|
|
|
|
for (i = 0; i < sizeof(aux_msg); i += 4)
|
|
|
|
intel_de_write(dev_priv,
|
|
|
|
psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
|
|
|
|
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
|
|
|
|
|
|
|
|
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
|
|
|
|
|
|
|
|
/* Start with bits set for DDI_AUX_CTL register */
|
|
|
|
aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
|
|
|
|
aux_clock_divider);
|
|
|
|
|
|
|
|
/* Select only valid bits for SRD_AUX_CTL */
|
|
|
|
aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
|
|
|
|
EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
|
|
|
|
EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
|
|
|
|
EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
|
|
|
|
|
|
|
|
intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
|
|
|
|
aux_ctl);
|
|
|
|
}
|
|
|
|
|
2018-06-26 05:25:36 +00:00
|
|
|
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
2018-03-13 03:46:45 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2018-03-28 22:30:45 +00:00
|
|
|
u8 dpcd_val = DP_PSR_ENABLE;
|
2018-03-13 03:46:45 +00:00
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (intel_dp->psr.panel_replay_enabled)
|
|
|
|
return;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
2023-11-08 07:23:02 +00:00
|
|
|
/* Enable ALPM at sink for psr2 */
|
2018-05-11 19:51:45 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
|
2019-11-28 01:48:50 +00:00
|
|
|
DP_ALPM_ENABLE |
|
|
|
|
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
|
|
|
|
|
2018-12-04 00:33:58 +00:00
|
|
|
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
|
2018-12-04 00:33:56 +00:00
|
|
|
} else {
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.link_standby)
|
2018-12-04 00:33:56 +00:00
|
|
|
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
|
2018-12-04 00:33:57 +00:00
|
|
|
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
2018-12-04 00:33:57 +00:00
|
|
|
dpcd_val |= DP_PSR_CRC_VERIFICATION;
|
2018-05-11 19:51:45 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:56 +00:00
|
|
|
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
|
|
|
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
|
|
|
|
|
2023-11-06 11:42:28 +00:00
|
|
|
if (intel_dp->psr.entry_setup_frames > 0)
|
|
|
|
dpcd_val |= DP_PSR_FRAME_CAPTURE;
|
|
|
|
|
2018-03-28 22:30:45 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
|
2016-05-18 16:47:14 +00:00
|
|
|
|
2018-03-13 03:46:46 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 19:57:42 +00:00
|
|
|
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2022-05-10 10:42:39 +00:00
|
|
|
struct intel_connector *connector = intel_dp->attached_connector;
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2019-03-12 19:57:42 +00:00
|
|
|
u32 val = 0;
|
2016-02-01 20:02:07 +00:00
|
|
|
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 11)
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= EDP_PSR_TP4_TIME_0us;
|
2019-03-12 19:57:43 +00:00
|
|
|
|
2023-10-24 12:40:50 +00:00
|
|
|
if (dev_priv->display.params.psr_safest_params) {
|
2020-05-20 21:27:56 +00:00
|
|
|
val |= EDP_PSR_TP1_TIME_2500us;
|
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_2500us;
|
|
|
|
goto check_tp3_sel;
|
|
|
|
}
|
|
|
|
|
2022-05-10 10:42:39 +00:00
|
|
|
if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
|
2019-03-12 19:57:42 +00:00
|
|
|
val |= EDP_PSR_TP1_TIME_0us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
|
2016-05-18 16:47:11 +00:00
|
|
|
val |= EDP_PSR_TP1_TIME_100us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR_TP1_TIME_500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
else
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR_TP1_TIME_2500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
|
2022-05-10 10:42:39 +00:00
|
|
|
if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
|
2019-03-12 19:57:42 +00:00
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_0us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
|
2016-05-18 16:47:11 +00:00
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_100us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
else
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_2500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
|
2023-06-09 14:14:02 +00:00
|
|
|
/*
|
|
|
|
* WA 0479: hsw,bdw
|
|
|
|
* "Do not skip both TP1 and TP2/TP3"
|
|
|
|
*/
|
|
|
|
if (DISPLAY_VER(dev_priv) < 9 &&
|
|
|
|
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
|
|
|
|
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
|
|
|
|
val |= EDP_PSR_TP2_TP3_TIME_100us;
|
|
|
|
|
2020-05-20 21:27:56 +00:00
|
|
|
check_tp3_sel:
|
2021-09-29 16:24:04 +00:00
|
|
|
if (intel_dp_source_supports_tps3(dev_priv) &&
|
2016-05-18 16:47:11 +00:00
|
|
|
drm_dp_tps3_supported(intel_dp->dpcd))
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= EDP_PSR_TP_TP1_TP3;
|
2016-05-18 16:47:11 +00:00
|
|
|
else
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= EDP_PSR_TP_TP1_TP2;
|
2016-05-18 16:47:11 +00:00
|
|
|
|
2019-03-12 19:57:42 +00:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2020-01-13 21:46:03 +00:00
|
|
|
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
|
2019-03-12 19:57:42 +00:00
|
|
|
{
|
2022-05-10 10:42:39 +00:00
|
|
|
struct intel_connector *connector = intel_dp->attached_connector;
|
2019-03-12 19:57:42 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2020-01-13 21:46:03 +00:00
|
|
|
int idle_frames;
|
2019-03-12 19:57:42 +00:00
|
|
|
|
|
|
|
/* Let's use 6 as the minimum to cover all known cases including the
|
|
|
|
* off-by-one issue that HW has in some cases.
|
|
|
|
*/
|
2022-05-10 10:42:39 +00:00
|
|
|
idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
|
2021-02-04 13:40:14 +00:00
|
|
|
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
|
2020-01-13 21:46:03 +00:00
|
|
|
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
|
2020-01-13 21:46:03 +00:00
|
|
|
idle_frames = 0xf;
|
|
|
|
|
|
|
|
return idle_frames;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hsw_activate_psr1(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2020-01-13 21:46:03 +00:00
|
|
|
u32 max_sleep_time = 0x1f;
|
|
|
|
u32 val = EDP_PSR_ENABLE;
|
|
|
|
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
|
2019-03-12 19:57:42 +00:00
|
|
|
|
2023-10-10 09:52:33 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) < 20)
|
|
|
|
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
|
|
|
|
|
2019-03-12 19:57:42 +00:00
|
|
|
if (IS_HASWELL(dev_priv))
|
|
|
|
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.link_standby)
|
2019-03-12 19:57:42 +00:00
|
|
|
val |= EDP_PSR_LINK_STANDBY;
|
|
|
|
|
|
|
|
val |= intel_psr1_get_tp_time(intel_dp);
|
|
|
|
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 8)
|
2018-06-26 20:16:44 +00:00
|
|
|
val |= EDP_PSR_CRC_ENABLE;
|
|
|
|
|
2023-11-06 11:42:28 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 20)
|
|
|
|
val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
|
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
|
2023-04-11 19:14:25 +00:00
|
|
|
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
|
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 18:00:59 +00:00
|
|
|
}
|
2016-05-18 16:47:11 +00:00
|
|
|
|
2020-05-20 21:27:56 +00:00
|
|
|
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
|
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 18:00:59 +00:00
|
|
|
{
|
2022-05-10 10:42:39 +00:00
|
|
|
struct intel_connector *connector = intel_dp->attached_connector;
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2020-05-20 21:27:56 +00:00
|
|
|
u32 val = 0;
|
2017-09-26 09:59:13 +00:00
|
|
|
|
2023-10-24 12:40:50 +00:00
|
|
|
if (dev_priv->display.params.psr_safest_params)
|
2020-05-20 21:27:56 +00:00
|
|
|
return EDP_PSR2_TP2_TIME_2500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
|
2022-05-10 10:42:39 +00:00
|
|
|
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
|
|
|
|
connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR2_TP2_TIME_50us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR2_TP2_TIME_100us;
|
2022-05-10 10:42:39 +00:00
|
|
|
else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR2_TP2_TIME_500us;
|
2016-05-18 16:47:11 +00:00
|
|
|
else
|
2018-05-22 09:27:23 +00:00
|
|
|
val |= EDP_PSR2_TP2_TIME_2500us;
|
2015-04-02 05:32:44 +00:00
|
|
|
|
2020-05-20 21:27:56 +00:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2023-03-29 15:07:01 +00:00
|
|
|
static int psr2_block_count_lines(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
return intel_dp->psr.io_wake_lines < 9 &&
|
|
|
|
intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int psr2_block_count(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
return psr2_block_count_lines(intel_dp) / 4;
|
|
|
|
}
|
|
|
|
|
2023-11-06 11:42:28 +00:00
|
|
|
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
u8 frames_before_su_entry;
|
|
|
|
|
|
|
|
frames_before_su_entry = max_t(u8,
|
|
|
|
intel_dp->psr.sink_sync_latency + 1,
|
|
|
|
2);
|
|
|
|
|
|
|
|
/* Entry setup frames must be at least 1 less than frames before SU entry */
|
|
|
|
if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
|
|
|
|
frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
|
|
|
|
|
|
|
|
return frames_before_su_entry;
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
|
|
|
|
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
|
|
|
|
|
|
|
|
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
|
|
|
|
TRANS_DP2_PANEL_REPLAY_ENABLE);
|
|
|
|
}
|
|
|
|
|
2020-05-20 21:27:56 +00:00
|
|
|
static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2021-06-25 23:55:59 +00:00
|
|
|
u32 val = EDP_PSR2_ENABLE;
|
2023-11-06 11:42:28 +00:00
|
|
|
u32 psr_val = 0;
|
2021-06-25 23:55:59 +00:00
|
|
|
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
|
2020-05-20 21:27:56 +00:00
|
|
|
|
2023-11-27 14:50:28 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
|
2021-06-25 23:55:59 +00:00
|
|
|
val |= EDP_SU_TRACK_ENABLE;
|
2020-05-20 21:27:56 +00:00
|
|
|
|
2023-11-27 14:50:28 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
|
2020-05-20 21:27:56 +00:00
|
|
|
val |= EDP_Y_COORDINATE_ENABLE;
|
|
|
|
|
2023-11-06 11:42:28 +00:00
|
|
|
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
|
|
|
|
|
2020-05-20 21:27:56 +00:00
|
|
|
val |= intel_psr2_get_tp_time(intel_dp);
|
|
|
|
|
2023-02-21 08:53:04 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 12) {
|
2023-03-29 15:07:01 +00:00
|
|
|
if (psr2_block_count(intel_dp) > 2)
|
2023-02-21 08:53:04 +00:00
|
|
|
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
|
2023-03-29 15:07:01 +00:00
|
|
|
else
|
|
|
|
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
|
2023-02-21 08:53:04 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 00:38:49 +00:00
|
|
|
/* Wa_22012278275:adl-p */
|
2023-08-01 13:53:40 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
|
2021-06-16 20:31:54 +00:00
|
|
|
static const u8 map[] = {
|
|
|
|
2, /* 5 lines */
|
|
|
|
1, /* 6 lines */
|
|
|
|
0, /* 7 lines */
|
|
|
|
3, /* 8 lines */
|
|
|
|
6, /* 9 lines */
|
|
|
|
5, /* 10 lines */
|
|
|
|
4, /* 11 lines */
|
|
|
|
7, /* 12 lines */
|
|
|
|
};
|
|
|
|
/*
|
|
|
|
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
|
|
|
|
* comments bellow for more information
|
|
|
|
*/
|
2023-04-11 19:14:24 +00:00
|
|
|
int tmp;
|
2021-06-16 20:31:54 +00:00
|
|
|
|
2023-02-21 08:53:04 +00:00
|
|
|
tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
|
2021-06-16 20:31:54 +00:00
|
|
|
|
2023-02-21 08:53:04 +00:00
|
|
|
tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
|
2023-04-11 19:14:24 +00:00
|
|
|
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
|
2021-06-16 20:31:54 +00:00
|
|
|
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
2023-02-21 08:53:04 +00:00
|
|
|
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
|
|
|
|
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
|
2021-03-20 04:42:42 +00:00
|
|
|
} else if (DISPLAY_VER(dev_priv) >= 9) {
|
2023-02-21 08:53:04 +00:00
|
|
|
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
|
|
|
|
val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
|
2020-06-07 14:36:14 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:56 +00:00
|
|
|
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
|
|
|
val |= EDP_PSR2_SU_SDP_SCANLINE;
|
|
|
|
|
2023-11-06 11:42:28 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 20)
|
|
|
|
psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
2021-09-22 21:52:41 +00:00
|
|
|
u32 tmp;
|
|
|
|
|
2023-04-11 19:14:29 +00:00
|
|
|
tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
|
2021-09-22 21:52:41 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
|
2020-08-10 17:41:44 +00:00
|
|
|
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
|
2020-08-10 17:41:44 +00:00
|
|
|
}
|
2020-08-10 17:41:43 +00:00
|
|
|
|
2019-03-14 23:01:13 +00:00
|
|
|
/*
|
2019-04-06 00:51:09 +00:00
|
|
|
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
|
|
|
|
* recommending keep this bit unset while PSR2 is enabled.
|
2019-03-14 23:01:13 +00:00
|
|
|
*/
|
2023-11-06 11:42:28 +00:00
|
|
|
intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
|
2019-03-14 23:01:13 +00:00
|
|
|
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2019-08-20 22:33:24 +00:00
|
|
|
static bool
|
2023-04-11 19:14:29 +00:00
|
|
|
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
|
2019-08-20 22:33:24 +00:00
|
|
|
{
|
2022-09-07 08:15:43 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
|
2023-04-11 19:14:29 +00:00
|
|
|
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
|
2021-10-27 18:05:45 +00:00
|
|
|
else if (DISPLAY_VER(dev_priv) >= 12)
|
2023-04-11 19:14:29 +00:00
|
|
|
return cpu_transcoder == TRANSCODER_A;
|
2023-06-09 14:13:57 +00:00
|
|
|
else if (DISPLAY_VER(dev_priv) >= 9)
|
2023-04-11 19:14:29 +00:00
|
|
|
return cpu_transcoder == TRANSCODER_EDP;
|
2023-06-09 14:13:57 +00:00
|
|
|
else
|
|
|
|
return false;
|
2019-08-20 22:33:24 +00:00
|
|
|
}
|
|
|
|
|
2023-12-07 19:34:40 +00:00
|
|
|
static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2023-12-07 19:34:40 +00:00
|
|
|
if (!crtc_state->hw.active)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return DIV_ROUND_UP(1000 * 1000,
|
2023-12-07 19:34:40 +00:00
|
|
|
drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
u32 idle_frames)
|
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
|
2023-04-11 19:14:24 +00:00
|
|
|
EDP_PSR2_IDLE_FRAMES_MASK,
|
|
|
|
EDP_PSR2_IDLE_FRAMES(idle_frames));
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
|
|
|
psr2_program_idle_frames(intel_dp, 0);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
|
|
|
|
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
2021-02-04 13:40:14 +00:00
|
|
|
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 21:49:45 +00:00
|
|
|
static void tgl_dc3co_disable_work(struct work_struct *work)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_dp *intel_dp =
|
|
|
|
container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
/* If delayed work is pending, it is not idle */
|
2021-02-04 13:40:14 +00:00
|
|
|
if (delayed_work_pending(&intel_dp->psr.dc3co_work))
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
goto unlock;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
tgl_psr2_disable_dc3co(intel_dp);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
unlock:
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2021-05-19 00:06:19 +00:00
|
|
|
if (!intel_dp->psr.dc3co_exitline)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
return;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
cancel_delayed_work(&intel_dp->psr.dc3co_work);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
/* Before PSR2 exit disallow dc3co*/
|
2021-02-04 13:40:14 +00:00
|
|
|
tgl_psr2_disable_dc3co(intel_dp);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2021-05-24 21:48:04 +00:00
|
|
|
static bool
|
|
|
|
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
enum port port = dig_port->base.port;
|
|
|
|
|
2022-09-07 08:15:43 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
|
2021-05-24 21:48:04 +00:00
|
|
|
return pipe <= PIPE_B && port <= PORT_B;
|
|
|
|
else
|
|
|
|
return pipe == PIPE_A && port == PORT_A;
|
|
|
|
}
|
|
|
|
|
2020-01-22 18:26:17 +00:00
|
|
|
static void
|
|
|
|
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-03-01 12:29:40 +00:00
|
|
|
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
|
2020-01-22 18:26:17 +00:00
|
|
|
u32 exit_scanlines;
|
|
|
|
|
2021-04-01 17:02:37 +00:00
|
|
|
/*
|
|
|
|
* FIXME: Due to the changed sequence of activating/deactivating DC3CO,
|
|
|
|
* disable DC3CO until the changed dc3co activating/deactivating sequence
|
|
|
|
* is applied. B.Specs:49196
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
|
2021-02-22 21:30:06 +00:00
|
|
|
/*
|
|
|
|
* DMC's DC3CO exit mechanism has an issue with Selective Fecth
|
|
|
|
* TODO: when the issue is addressed, this restriction should be removed.
|
|
|
|
*/
|
|
|
|
if (crtc_state->enable_psr2_sel_fetch)
|
|
|
|
return;
|
|
|
|
|
2023-03-01 12:29:40 +00:00
|
|
|
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
|
2020-01-22 18:26:17 +00:00
|
|
|
return;
|
|
|
|
|
2021-05-24 21:48:04 +00:00
|
|
|
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
|
2020-01-22 18:26:17 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-13 00:38:49 +00:00
|
|
|
/* Wa_16011303918:adl-p */
|
2023-08-01 13:53:40 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
|
2021-06-16 20:31:57 +00:00
|
|
|
return;
|
|
|
|
|
2020-01-22 18:26:17 +00:00
|
|
|
/*
|
|
|
|
* DC3CO Exit time 200us B.Spec 49196
|
|
|
|
* PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
|
|
|
|
*/
|
|
|
|
exit_scanlines =
|
|
|
|
intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
|
|
|
|
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
|
2020-01-22 18:26:17 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
|
|
|
|
}
|
|
|
|
|
2020-08-10 17:41:43 +00:00
|
|
|
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
2023-10-24 12:40:50 +00:00
|
|
|
if (!dev_priv->display.params.enable_psr2_sel_fetch &&
|
2021-02-09 20:50:36 +00:00
|
|
|
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
|
2020-08-10 17:41:43 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 sel fetch not enabled, disabled by parameter\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (crtc_state->uapi.async_flip) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 sel fetch not enabled, async flip enabled\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return crtc_state->enable_psr2_sel_fetch = true;
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
static bool psr2_granularity_check(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
2021-06-25 23:55:59 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2022-11-02 17:45:43 +00:00
|
|
|
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
2021-06-16 20:31:53 +00:00
|
|
|
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
|
|
|
|
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
|
|
|
|
u16 y_granularity = 0;
|
|
|
|
|
|
|
|
/* PSR2 HW only send full lines so we only need to validate the width */
|
|
|
|
if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* HW tracking is only aligned to 4 lines */
|
|
|
|
if (!crtc_state->enable_psr2_sel_fetch)
|
|
|
|
return intel_dp->psr.su_y_granularity == 4;
|
|
|
|
|
|
|
|
/*
|
2022-12-09 22:05:43 +00:00
|
|
|
* adl_p and mtl platforms have 1 line granularity.
|
2022-09-07 08:15:43 +00:00
|
|
|
* For other platforms with SW tracking we can adjust the y coordinates
|
|
|
|
* to match sink requirement if multiple of 4.
|
2021-06-16 20:31:53 +00:00
|
|
|
*/
|
2022-09-07 08:15:43 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
|
2021-06-25 23:55:59 +00:00
|
|
|
y_granularity = intel_dp->psr.su_y_granularity;
|
|
|
|
else if (intel_dp->psr.su_y_granularity <= 2)
|
2021-06-16 20:31:53 +00:00
|
|
|
y_granularity = 4;
|
|
|
|
else if ((intel_dp->psr.su_y_granularity % 4) == 0)
|
|
|
|
y_granularity = intel_dp->psr.su_y_granularity;
|
|
|
|
|
|
|
|
if (y_granularity == 0 || crtc_vdisplay % y_granularity)
|
|
|
|
return false;
|
|
|
|
|
2022-11-02 17:45:43 +00:00
|
|
|
if (crtc_state->dsc.compression_enable &&
|
|
|
|
vdsc_cfg->slice_height % y_granularity)
|
|
|
|
return false;
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
crtc_state->su_y_granularity = y_granularity;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:56 +00:00
|
|
|
static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
u32 hblank_total, hblank_ns, req_ns;
|
|
|
|
|
|
|
|
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
|
|
|
|
hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
|
|
|
|
|
2022-09-05 10:23:54 +00:00
|
|
|
/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
|
|
|
|
req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
|
2021-06-16 20:31:56 +00:00
|
|
|
|
|
|
|
if ((hblank_ns - req_ns) > 100)
|
|
|
|
return true;
|
|
|
|
|
2022-09-05 10:23:55 +00:00
|
|
|
/* Not supported <13 / Wa_22012279113:adl-p */
|
2023-11-27 14:50:28 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
|
2021-06-16 20:31:56 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
crtc_state->req_psr2_sdp_prior_scanline = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-21 08:53:04 +00:00
|
|
|
static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
|
|
|
|
u8 max_wake_lines;
|
|
|
|
|
|
|
|
if (DISPLAY_VER(i915) >= 12) {
|
|
|
|
io_wake_time = 42;
|
|
|
|
/*
|
|
|
|
* According to Bspec it's 42us, but based on testing
|
|
|
|
* it is not enough -> use 45 us.
|
|
|
|
*/
|
|
|
|
fast_wake_time = 45;
|
|
|
|
max_wake_lines = 12;
|
|
|
|
} else {
|
|
|
|
io_wake_time = 50;
|
|
|
|
fast_wake_time = 32;
|
|
|
|
max_wake_lines = 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
io_wake_lines = intel_usecs_to_scanlines(
|
2023-06-20 11:17:45 +00:00
|
|
|
&crtc_state->hw.adjusted_mode, io_wake_time);
|
2023-02-21 08:53:04 +00:00
|
|
|
fast_wake_lines = intel_usecs_to_scanlines(
|
2023-06-20 11:17:45 +00:00
|
|
|
&crtc_state->hw.adjusted_mode, fast_wake_time);
|
2023-02-21 08:53:04 +00:00
|
|
|
|
|
|
|
if (io_wake_lines > max_wake_lines ||
|
|
|
|
fast_wake_lines > max_wake_lines)
|
|
|
|
return false;
|
|
|
|
|
2023-10-24 12:40:50 +00:00
|
|
|
if (i915->display.params.psr_safest_params)
|
2023-02-21 08:53:04 +00:00
|
|
|
io_wake_lines = fast_wake_lines = max_wake_lines;
|
|
|
|
|
|
|
|
/* According to Bspec lower limit should be set as 7 lines. */
|
|
|
|
intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
|
|
|
|
intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-11-13 09:37:37 +00:00
|
|
|
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
|
|
|
|
const struct drm_display_mode *adjusted_mode)
|
2023-11-06 11:42:28 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
|
2023-11-13 09:37:37 +00:00
|
|
|
int entry_setup_frames = 0;
|
2023-11-06 11:42:28 +00:00
|
|
|
|
|
|
|
if (psr_setup_time < 0) {
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
|
|
|
|
intel_dp->psr_dpcd[1]);
|
|
|
|
return -ETIME;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
|
|
|
|
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
|
|
|
|
if (DISPLAY_VER(i915) >= 20) {
|
|
|
|
/* setup entry frames can be up to 3 frames */
|
|
|
|
entry_setup_frames = 1;
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"PSR setup entry frames %d\n",
|
|
|
|
entry_setup_frames);
|
|
|
|
} else {
|
|
|
|
drm_dbg_kms(&i915->drm,
|
|
|
|
"PSR condition failed: PSR setup time (%d us) too long\n",
|
|
|
|
psr_setup_time);
|
|
|
|
return -ETIME;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry_setup_frames;
|
|
|
|
}
|
|
|
|
|
2018-02-27 21:29:13 +00:00
|
|
|
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2019-10-31 11:26:02 +00:00
|
|
|
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
|
|
|
|
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
|
2019-11-28 01:48:48 +00:00
|
|
|
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
|
2018-02-27 21:29:13 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!intel_dp->psr.sink_psr2_support)
|
2018-02-27 21:29:13 +00:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 17:58:30 +00:00
|
|
|
/* JSL and EHL only supports eDP 1.3 */
|
2023-08-01 13:53:38 +00:00
|
|
|
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
|
2021-02-04 17:58:30 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-04-08 21:42:05 +00:00
|
|
|
/* Wa_16011181250 */
|
2021-07-14 03:15:38 +00:00
|
|
|
if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
|
|
|
|
IS_DG2(dev_priv)) {
|
2021-04-08 21:42:05 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-08-01 13:53:40 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
|
2021-09-30 00:14:07 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
|
2021-05-24 21:48:05 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-08-20 22:33:24 +00:00
|
|
|
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not supported in transcoder %s\n",
|
|
|
|
transcoder_name(crtc_state->cpu_transcoder));
|
2019-08-20 22:33:24 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!psr2_global_enabled(intel_dp)) {
|
2020-10-07 19:52:37 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-28 20:26:14 +00:00
|
|
|
/*
|
|
|
|
* DSC and PSR2 cannot be enabled simultaneously. If a requested
|
|
|
|
* resolution requires DSC to be enabled, priority is given to DSC
|
|
|
|
* over PSR2.
|
|
|
|
*/
|
2022-09-07 08:15:43 +00:00
|
|
|
if (crtc_state->dsc.compression_enable &&
|
2023-11-27 14:50:28 +00:00
|
|
|
(DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 cannot be enabled since DSC is enabled\n");
|
2018-11-28 20:26:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-06-26 01:01:48 +00:00
|
|
|
if (crtc_state->crc_enabled) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled because it would inhibit pipe CRC calculation\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 12) {
|
2019-08-23 08:20:41 +00:00
|
|
|
psr_max_h = 5120;
|
|
|
|
psr_max_v = 3200;
|
2019-11-28 01:48:48 +00:00
|
|
|
max_bpp = 30;
|
drm/i915/display: Simplify GLK display version tests
GLK has always been a bit of a special case since it reports INTEL_GEN()
as 9, but has version 10 display IP. Now we can properly represent the
display version as 10 and simplify the display generation tests
throughout the display code.
Aside from manually adding the version to the glk_info structure, the
rest of this patch is generated with a Coccinelle semantic patch. Note
that we also need to switch any code that matches gen10 today but *not*
GLK to be CNL-specific:
@@ expression dev_priv; @@
- DISPLAY_VER(dev_priv) > 9
+ DISPLAY_VER(dev_priv) >= 10
@@ expression dev_priv, E; @@
(
- DISPLAY_VER(dev_priv) >= 10 && E
+ (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && E
|
- DISPLAY_VER(dev_priv) >= 10
+ DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)
|
- IS_DISPLAY_RANGE(dev_priv, 10, E)
+ IS_DISPLAY_RANGE(dev_priv, 11, E) || IS_CANNONLAKE(dev_priv)
)
@@ expression dev_priv, E, E2; @@
(
- (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ IS_DISPLAY_VER(dev_priv, 10)
|
- E || IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)
+ E || IS_DISPLAY_VER(dev_priv, 10)
|
- (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
+ IS_DISPLAY_VER(dev_priv, 10)
|
- IS_GEMINILAKE(dev_priv) || E || IS_CANNONLAKE(dev_priv)
+ E || IS_DISPLAY_VER(dev_priv, 10)
|
- E || IS_GEMINILAKE(dev_priv) || E2 || IS_CANNONLAKE(dev_priv)
+ E || E2 || IS_DISPLAY_VER(dev_priv, 10)
|
- (IS_DISPLAY_VER(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
+ IS_DISPLAY_VER(dev_priv, 10)
|
- (IS_GEMINILAKE(dev_priv) || IS_DISPLAY_VER(dev_priv, 10))
+ IS_DISPLAY_VER(dev_priv, 10)
)
@@ expression dev_priv; @@
- (IS_DISPLAY_VER(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+ IS_DISPLAY_VER(dev_priv, 9)
@@ expression dev_priv; @@
(
- !(DISPLAY_VER(dev_priv) >= 11 || IS_DISPLAY_VER(dev_priv, 10))
+ DISPLAY_VER(dev_priv) < 10
|
- (DISPLAY_VER(dev_priv) >= 11 || IS_DISPLAY_VER(dev_priv, 10))
+ DISPLAY_VER(dev_priv) >= 10
)
@@ expression dev_priv, E; @@
- E || DISPLAY_VER(dev_priv) >= 11 || IS_DISPLAY_VER(dev_priv, 10)
+ E || DISPLAY_VER(dev_priv) >= 10
@@ expression dev_priv, E; @@
- (IS_DISPLAY_RANGE(dev_priv, 11, E) || IS_DISPLAY_VER(dev_priv, 10))
+ IS_DISPLAY_RANGE(dev_priv, 10, E)
@@ expression dev_priv; @@
(
- DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv) || IS_GEN9_LP(dev_priv)
+ DISPLAY_VER(dev_priv) >= 10 || IS_GEN9_LP(dev_priv)
|
- IS_GEN9_LP(dev_priv) || DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)
+ IS_GEN9_LP(dev_priv) || DISPLAY_VER(dev_priv) >= 10
)
@@ expression dev_priv, E; @@
- !(DISPLAY_VER(dev_priv) >= E)
+ DISPLAY_VER(dev_priv) < E
v2:
- Convert gen10 conditions that don't include GLK into CNL conditions.
(Ville)
v3:
- Rework coccinelle rules so that "ver>=10" turns into "ver>=11||is_cnl." (Ville)
v3.1:
- Manually re-add the ".display.version = 10" to glk_info after
regenerating patch via Coccinelle.
v4:
- Also apply cocci rules to intel_pm.c and i915_irq.c! (CI)
Cc: Ville Syrjälä <ville.syrjala@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210322233840.4056851-1-matthew.d.roper@intel.com
2021-03-22 23:38:40 +00:00
|
|
|
} else if (DISPLAY_VER(dev_priv) >= 10) {
|
2018-03-06 20:33:55 +00:00
|
|
|
psr_max_h = 4096;
|
|
|
|
psr_max_v = 2304;
|
2019-11-28 01:48:48 +00:00
|
|
|
max_bpp = 24;
|
drm/i915/display: rename display version macros
While converting the rest of the driver to use GRAPHICS_VER() and
MEDIA_VER(), following what was done for display, some discussions went
back on what we did for display:
1) Why is the == comparison special that deserves a separate
macro instead of just getting the version and comparing directly
like is done for >, >=, <=?
2) IS_DISPLAY_RANGE() is weird in that it omits the "_VER" for
brevity. If we remove the current users of IS_DISPLAY_VER(), we
could actually repurpose it for a range check
With (1) there could be an advantage if we used gen_mask since multiple
conditionals be combined by the compiler in a single and instruction and
check the result. However a) INTEL_GEN() doesn't use the mask since it
would make the code bigger everywhere else and b) in the cases it made
sense, it also made sense to convert to the _RANGE() variant.
So here we repurpose IS_DISPLAY_VER() to work with a [ from, to ] range
like was the IS_DISPLAY_RANGE() and convert the current IS_DISPLAY_VER()
users to use == and != operators. Aside from the definition changes,
this was done by the following semantic patch:
@@ expression dev_priv, E1; @@
- !IS_DISPLAY_VER(dev_priv, E1)
+ DISPLAY_VER(dev_priv) != E1
@@ expression dev_priv, E1; @@
- IS_DISPLAY_VER(dev_priv, E1)
+ DISPLAY_VER(dev_priv) == E1
@@ expression dev_priv, from, until; @@
- IS_DISPLAY_RANGE(dev_priv, from, until)
+ IS_DISPLAY_VER(dev_priv, from, until)
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
[Jani: Minor conflict resolve while applying.]
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-4-lucas.demarchi@intel.com
2021-04-13 05:09:53 +00:00
|
|
|
} else if (DISPLAY_VER(dev_priv) == 9) {
|
2018-03-06 20:33:55 +00:00
|
|
|
psr_max_h = 3640;
|
|
|
|
psr_max_v = 2304;
|
2019-11-28 01:48:48 +00:00
|
|
|
max_bpp = 24;
|
2018-03-06 20:33:55 +00:00
|
|
|
}
|
|
|
|
|
2019-11-28 01:48:48 +00:00
|
|
|
if (crtc_state->pipe_bpp > max_bpp) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
|
|
|
|
crtc_state->pipe_bpp, max_bpp);
|
2019-11-28 01:48:48 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-04-14 15:11:17 +00:00
|
|
|
/* Wa_16011303918:adl-p */
|
|
|
|
if (crtc_state->vrr.enable &&
|
2023-08-01 13:53:40 +00:00
|
|
|
IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
|
2022-04-14 15:11:17 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-02-21 08:53:04 +00:00
|
|
|
if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, Unable to use long enough wake times\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-03-29 15:07:02 +00:00
|
|
|
/* Vblank >= PSR2_CTL Block Count Number maximum line count */
|
|
|
|
if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
|
|
|
|
crtc_state->hw.adjusted_mode.crtc_vblank_start <
|
|
|
|
psr2_block_count_lines(intel_dp)) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, too short vblank time\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-10 17:41:43 +00:00
|
|
|
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
|
|
|
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
|
|
|
|
!HAS_PSR_HW_TRACKING(dev_priv)) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
|
|
|
|
return false;
|
|
|
|
}
|
2020-06-26 01:01:48 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
if (!psr2_granularity_check(intel_dp, crtc_state)) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
|
2022-04-14 15:11:17 +00:00
|
|
|
goto unsupported;
|
2021-06-16 20:31:53 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 17:41:43 +00:00
|
|
|
if (!crtc_state->enable_psr2_sel_fetch &&
|
|
|
|
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
|
2020-06-26 01:01:48 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
|
|
|
|
crtc_hdisplay, crtc_vdisplay,
|
|
|
|
psr_max_h, psr_max_v);
|
2022-04-14 15:11:17 +00:00
|
|
|
goto unsupported;
|
2021-06-16 20:31:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-22 18:26:17 +00:00
|
|
|
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
|
2018-02-27 21:29:13 +00:00
|
|
|
return true;
|
2022-04-14 15:11:17 +00:00
|
|
|
|
|
|
|
unsupported:
|
|
|
|
crtc_state->enable_psr2_sel_fetch = false;
|
|
|
|
return false;
|
2018-02-27 21:29:13 +00:00
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
static bool _psr_compute_config(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-11-08 07:23:00 +00:00
|
|
|
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
2023-11-16 09:05:12 +00:00
|
|
|
int entry_setup_frames;
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-01-22 23:26:36 +00:00
|
|
|
/*
|
2022-07-01 20:32:36 +00:00
|
|
|
* Current PSR panels don't work reliably with VRR enabled
|
2021-01-22 23:26:36 +00:00
|
|
|
* So if VRR is enabled, do not enable PSR.
|
|
|
|
*/
|
|
|
|
if (crtc_state->vrr.enable)
|
2023-11-08 07:23:00 +00:00
|
|
|
return false;
|
2021-01-22 23:26:36 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!CAN_PSR(intel_dp))
|
2023-11-08 07:23:00 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
|
|
|
|
|
|
|
|
if (entry_setup_frames >= 0) {
|
|
|
|
intel_dp->psr.entry_setup_frames = entry_setup_frames;
|
|
|
|
} else {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR condition failed: PSR setup timing not met\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
2017-10-12 13:02:01 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!psr_global_enabled(intel_dp)) {
|
2020-10-07 19:52:37 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
|
2020-05-14 06:07:32 +00:00
|
|
|
return;
|
2020-10-07 19:52:37 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.sink_not_reliable) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR sink implementation is not reliable\n");
|
2018-11-21 22:54:38 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-06 00:51:12 +00:00
|
|
|
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR condition failed: Interlaced mode enabled\n");
|
2017-10-12 13:02:01 +00:00
|
|
|
return;
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2024-04-04 21:34:26 +00:00
|
|
|
/*
|
|
|
|
* FIXME figure out what is wrong with PSR+bigjoiner and
|
|
|
|
* fix it. Presumably something related to the fact that
|
|
|
|
* PSR is a transcoder level feature.
|
|
|
|
*/
|
|
|
|
if (crtc_state->bigjoiner_pipes) {
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR disabled due to bigjoiner\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
if (CAN_PANEL_REPLAY(intel_dp))
|
|
|
|
crtc_state->has_panel_replay = true;
|
|
|
|
else
|
|
|
|
crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
|
2017-10-12 13:02:01 +00:00
|
|
|
|
2023-11-20 13:02:14 +00:00
|
|
|
if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
|
|
|
|
return;
|
|
|
|
|
2018-02-27 21:29:13 +00:00
|
|
|
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
|
2021-09-22 21:52:42 +00:00
|
|
|
|
2020-05-14 06:07:32 +00:00
|
|
|
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
|
2021-09-22 21:52:42 +00:00
|
|
|
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
|
|
|
|
&crtc_state->psr_vsc);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2021-04-18 00:21:22 +00:00
|
|
|
void intel_psr_get_config(struct intel_encoder *encoder,
|
|
|
|
struct intel_crtc_state *pipe_config)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
|
|
|
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
2021-04-18 00:21:22 +00:00
|
|
|
struct intel_dp *intel_dp;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
if (!dig_port)
|
|
|
|
return;
|
|
|
|
|
|
|
|
intel_dp = &dig_port->dp;
|
2023-11-08 07:23:02 +00:00
|
|
|
if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
|
2021-04-18 00:21:22 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
if (!intel_dp->psr.enabled)
|
|
|
|
goto unlock;
|
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (intel_dp->psr.panel_replay_enabled) {
|
|
|
|
pipe_config->has_panel_replay = true;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
|
|
|
|
* enabled/disabled because of frontbuffer tracking and others.
|
|
|
|
*/
|
|
|
|
pipe_config->has_psr = true;
|
|
|
|
}
|
|
|
|
|
2021-04-18 00:21:22 +00:00
|
|
|
pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
|
|
|
|
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
|
|
|
|
|
|
|
|
if (!intel_dp->psr.psr2_enabled)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
|
2021-04-18 00:21:22 +00:00
|
|
|
if (val & PSR2_MAN_TRK_CTL_ENABLE)
|
|
|
|
pipe_config->enable_psr2_sel_fetch = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DISPLAY_VER(dev_priv) >= 12) {
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
|
2023-04-11 19:14:24 +00:00
|
|
|
pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
|
2021-04-18 00:21:22 +00:00
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
}
|
|
|
|
|
2014-11-19 15:37:00 +00:00
|
|
|
static void intel_psr_activate(struct intel_dp *intel_dp)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm,
|
|
|
|
transcoder_has_psr2(dev_priv, cpu_transcoder) &&
|
|
|
|
intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
|
2019-08-17 09:38:33 +00:00
|
|
|
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm,
|
2023-06-09 14:13:54 +00:00
|
|
|
intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
|
2023-06-09 14:13:54 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
lockdep_assert_held(&intel_dp->psr.lock);
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
/* psr1, psr2 and panel-replay are mutually exclusive.*/
|
|
|
|
if (intel_dp->psr.panel_replay_enabled)
|
|
|
|
dg2_activate_panel_replay(intel_dp);
|
|
|
|
else if (intel_dp->psr.psr2_enabled)
|
2018-06-26 05:25:36 +00:00
|
|
|
hsw_activate_psr2(intel_dp);
|
|
|
|
else
|
|
|
|
hsw_activate_psr1(intel_dp);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.active = true;
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2022-02-10 18:52:23 +00:00
|
|
|
static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
switch (intel_dp->psr.pipe) {
|
|
|
|
case PIPE_A:
|
|
|
|
return LATENCY_REPORTING_REMOVED_PIPE_A;
|
|
|
|
case PIPE_B:
|
|
|
|
return LATENCY_REPORTING_REMOVED_PIPE_B;
|
|
|
|
case PIPE_C:
|
|
|
|
return LATENCY_REPORTING_REMOVED_PIPE_C;
|
2023-01-05 06:56:37 +00:00
|
|
|
case PIPE_D:
|
|
|
|
return LATENCY_REPORTING_REMOVED_PIPE_D;
|
2022-02-10 18:52:23 +00:00
|
|
|
default:
|
|
|
|
MISSING_CASE(intel_dp->psr.pipe);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-29 15:06:59 +00:00
|
|
|
/*
|
|
|
|
* Wa_16013835468
|
2023-03-29 15:07:00 +00:00
|
|
|
* Wa_14015648006
|
2023-03-29 15:06:59 +00:00
|
|
|
*/
|
|
|
|
static void wm_optimization_wa(struct intel_dp *intel_dp,
|
|
|
|
const struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
bool set_wa_bit = false;
|
|
|
|
|
2023-03-29 15:07:00 +00:00
|
|
|
/* Wa_14015648006 */
|
2023-08-29 09:44:35 +00:00
|
|
|
if (IS_DISPLAY_VER(dev_priv, 11, 14))
|
2023-03-29 15:07:00 +00:00
|
|
|
set_wa_bit |= crtc_state->wm_level_disabled;
|
|
|
|
|
2023-03-29 15:06:59 +00:00
|
|
|
/* Wa_16013835468 */
|
|
|
|
if (DISPLAY_VER(dev_priv) == 12)
|
|
|
|
set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
|
|
|
|
crtc_state->hw.adjusted_mode.crtc_vdisplay;
|
|
|
|
|
|
|
|
if (set_wa_bit)
|
|
|
|
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
|
|
|
|
0, wa_16013835468_bit_get(intel_dp));
|
|
|
|
else
|
|
|
|
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
|
|
|
|
wa_16013835468_bit_get(intel_dp), 0);
|
|
|
|
}
|
|
|
|
|
2022-02-10 18:52:23 +00:00
|
|
|
static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
|
|
|
const struct intel_crtc_state *crtc_state)
|
2017-09-07 23:00:36 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2021-05-26 00:06:56 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2018-10-03 20:50:26 +00:00
|
|
|
u32 mask;
|
2017-09-07 23:00:36 +00:00
|
|
|
|
2023-06-09 14:13:56 +00:00
|
|
|
/*
|
|
|
|
* Only HSW and BDW have PSR AUX registers that need to be setup.
|
|
|
|
* SKL+ use hardcoded values PSR AUX transactions
|
|
|
|
*/
|
|
|
|
if (DISPLAY_VER(dev_priv) < 9)
|
|
|
|
hsw_psr_setup_aux(intel_dp);
|
|
|
|
|
2018-10-03 20:50:25 +00:00
|
|
|
/*
|
|
|
|
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
|
|
|
|
* mask LPSP to avoid dependency on other drivers that might block
|
|
|
|
* runtime_pm besides preventing other hw tracking issues now we
|
|
|
|
* can rely on frontbuffer tracking.
|
|
|
|
*/
|
2018-10-03 20:50:26 +00:00
|
|
|
mask = EDP_PSR_DEBUG_MASK_MEMUP |
|
2024-01-18 21:21:31 +00:00
|
|
|
EDP_PSR_DEBUG_MASK_HPD;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For some unknown reason on HSW non-ULT (or at least on
|
|
|
|
* Dell Latitude E6540) external displays start to flicker
|
|
|
|
* when PSR is enabled on the eDP. SR/PC6 residency is much
|
|
|
|
* higher than should be possible with an external display.
|
|
|
|
* As a workaround leave LPSP unmasked to prevent PSR entry
|
|
|
|
* when external displays are active.
|
|
|
|
*/
|
|
|
|
if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
|
|
|
|
mask |= EDP_PSR_DEBUG_MASK_LPSP;
|
2023-10-10 09:52:33 +00:00
|
|
|
|
|
|
|
if (DISPLAY_VER(dev_priv) < 20)
|
|
|
|
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
|
2018-10-03 20:50:26 +00:00
|
|
|
|
2023-06-09 14:14:01 +00:00
|
|
|
/*
|
|
|
|
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
|
|
|
|
* registers in order to keep the CURSURFLIVE tricks working :(
|
|
|
|
*/
|
|
|
|
if (IS_DISPLAY_VER(dev_priv, 9, 10))
|
2018-10-03 20:50:26 +00:00
|
|
|
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
|
|
|
|
|
2023-06-09 14:14:03 +00:00
|
|
|
/* allow PSR with sprite enabled */
|
|
|
|
if (IS_HASWELL(dev_priv))
|
|
|
|
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
|
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
|
2019-08-20 22:33:25 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
psr_irq_control(intel_dp);
|
2020-01-22 18:26:17 +00:00
|
|
|
|
2023-01-05 13:10:44 +00:00
|
|
|
/*
|
|
|
|
* TODO: if future platforms supports DC3CO in more than one
|
|
|
|
* transcoder, EXITLINE will need to be unset when disabling PSR
|
|
|
|
*/
|
|
|
|
if (intel_dp->psr.dc3co_exitline)
|
2023-02-13 22:52:49 +00:00
|
|
|
intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
|
2023-01-05 13:10:44 +00:00
|
|
|
intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
|
2020-08-10 17:41:43 +00:00
|
|
|
|
2020-10-07 19:52:36 +00:00
|
|
|
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
|
2020-08-10 17:41:43 +00:00
|
|
|
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.psr2_sel_fetch_enabled ?
|
2020-08-10 17:41:43 +00:00
|
|
|
IGNORE_PSR2_HW_TRACKING : 0);
|
2021-06-16 20:31:55 +00:00
|
|
|
|
2023-01-05 06:56:37 +00:00
|
|
|
/*
|
|
|
|
* Wa_16013835468
|
2023-03-29 15:07:00 +00:00
|
|
|
* Wa_14015648006
|
2023-01-05 06:56:37 +00:00
|
|
|
*/
|
2023-03-29 15:06:59 +00:00
|
|
|
wm_optimization_wa(intel_dp, crtc_state);
|
2023-01-05 06:56:37 +00:00
|
|
|
|
2022-02-10 18:52:22 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
|
|
|
if (DISPLAY_VER(dev_priv) == 9)
|
|
|
|
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
|
|
|
|
PSR2_VSC_ENABLE_PROG_HEADER |
|
|
|
|
PSR2_ADD_VERTICAL_LINE_COUNT);
|
|
|
|
|
|
|
|
/*
|
2022-12-09 22:05:43 +00:00
|
|
|
* Wa_16014451276:adlp,mtl[a0,b0]
|
2022-02-10 18:52:22 +00:00
|
|
|
* All supported adlp panels have 1-based X granularity, this may
|
|
|
|
* cause issues if non-supported panels are used.
|
|
|
|
*/
|
2023-11-01 11:42:08 +00:00
|
|
|
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
|
|
|
|
IS_ALDERLAKE_P(dev_priv))
|
|
|
|
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
|
|
|
|
0, ADLP_1_BASED_X_GRANULARITY);
|
2022-02-10 18:52:22 +00:00
|
|
|
|
2022-12-09 22:05:43 +00:00
|
|
|
/* Wa_16012604467:adlp,mtl[a0,b0] */
|
2023-08-21 18:06:26 +00:00
|
|
|
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
|
2022-12-09 22:05:43 +00:00
|
|
|
intel_de_rmw(dev_priv,
|
|
|
|
MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
|
|
|
|
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
|
|
|
|
else if (IS_ALDERLAKE_P(dev_priv))
|
2022-02-10 18:52:22 +00:00
|
|
|
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
|
|
|
|
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
|
|
|
|
}
|
2017-09-07 23:00:36 +00:00
|
|
|
}
|
|
|
|
|
2021-05-19 00:06:20 +00:00
|
|
|
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
|
2018-08-09 14:21:01 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2019-08-20 22:33:23 +00:00
|
|
|
u32 val;
|
2018-08-09 14:21:01 +00:00
|
|
|
|
2019-08-20 22:33:23 +00:00
|
|
|
/*
|
|
|
|
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
|
|
|
|
* will still keep the error set even after the reset done in the
|
|
|
|
* irq_preinstall and irq_uninstall hooks.
|
|
|
|
* And enabling in this situation cause the screen to freeze in the
|
|
|
|
* first time that PSR HW tries to activate so lets keep PSR disabled
|
|
|
|
* to avoid any rendering problems.
|
|
|
|
*/
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
|
2022-10-03 07:20:11 +00:00
|
|
|
val &= psr_irq_psr_error_bit_get(intel_dp);
|
2019-08-20 22:33:23 +00:00
|
|
|
if (val) {
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.sink_not_reliable = true;
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR interruption error set, not enabling PSR\n");
|
2021-05-19 00:06:20 +00:00
|
|
|
return false;
|
2019-08-20 22:33:23 +00:00
|
|
|
}
|
2018-08-09 14:21:01 +00:00
|
|
|
|
2021-05-19 00:06:20 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
2021-09-22 21:52:42 +00:00
|
|
|
const struct intel_crtc_state *crtc_state)
|
2021-05-19 00:06:20 +00:00
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2021-07-23 17:42:37 +00:00
|
|
|
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
|
2021-05-19 00:06:20 +00:00
|
|
|
struct intel_encoder *encoder = &dig_port->base;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
|
|
|
|
|
|
|
|
intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
|
2023-11-08 07:23:02 +00:00
|
|
|
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
|
2021-05-19 00:06:20 +00:00
|
|
|
intel_dp->psr.busy_frontbuffer_bits = 0;
|
|
|
|
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
|
|
|
intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
|
|
|
|
/* DC5/DC6 requires at least 6 idle frames */
|
|
|
|
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
|
|
|
|
intel_dp->psr.dc3co_exit_delay = val;
|
|
|
|
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
|
|
|
|
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
|
2022-04-05 15:53:44 +00:00
|
|
|
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
|
2021-06-16 20:31:56 +00:00
|
|
|
intel_dp->psr.req_psr2_sdp_prior_scanline =
|
|
|
|
crtc_state->req_psr2_sdp_prior_scanline;
|
2021-05-19 00:06:20 +00:00
|
|
|
|
|
|
|
if (!psr_interrupt_error_check(intel_dp))
|
|
|
|
return;
|
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (intel_dp->psr.panel_replay_enabled)
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
|
|
|
|
else
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
|
|
|
|
intel_dp->psr.psr2_enabled ? "2" : "1");
|
|
|
|
|
2021-09-22 21:52:42 +00:00
|
|
|
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
|
2021-07-23 17:42:37 +00:00
|
|
|
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
|
2018-08-09 14:21:01 +00:00
|
|
|
intel_psr_enable_sink(intel_dp);
|
2022-02-10 18:52:23 +00:00
|
|
|
intel_psr_enable_source(intel_dp, crtc_state);
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.enabled = true;
|
2021-06-08 08:54:14 +00:00
|
|
|
intel_dp->psr.paused = false;
|
2018-08-09 14:21:01 +00:00
|
|
|
|
|
|
|
intel_psr_activate(intel_dp);
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void intel_psr_exit(struct intel_dp *intel_dp)
|
2018-11-06 19:08:40 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2018-11-06 19:08:40 +00:00
|
|
|
u32 val;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!intel_dp->psr.active) {
|
2023-04-11 19:14:29 +00:00
|
|
|
if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
|
|
|
|
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
|
2019-08-20 22:33:23 +00:00
|
|
|
}
|
|
|
|
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
|
2019-08-20 22:33:23 +00:00
|
|
|
|
2018-11-06 19:08:40 +00:00
|
|
|
return;
|
2018-11-06 19:08:41 +00:00
|
|
|
}
|
2018-11-06 19:08:40 +00:00
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (intel_dp->psr.panel_replay_enabled) {
|
|
|
|
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
|
|
|
|
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
|
|
|
|
} else if (intel_dp->psr.psr2_enabled) {
|
2021-02-04 13:40:14 +00:00
|
|
|
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
|
2023-04-11 19:14:25 +00:00
|
|
|
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
|
2023-04-11 19:14:25 +00:00
|
|
|
EDP_PSR2_ENABLE, 0);
|
|
|
|
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
|
2018-11-06 19:08:40 +00:00
|
|
|
} else {
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
|
2023-04-11 19:14:25 +00:00
|
|
|
EDP_PSR_ENABLE, 0);
|
|
|
|
|
drm/i915/display/psr: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200128181603.27767-17-pankaj.laxminarayan.bharadiya@intel.com
2020-01-28 18:15:58 +00:00
|
|
|
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
|
2018-11-06 19:08:40 +00:00
|
|
|
}
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.active = false;
|
2018-11-06 19:08:40 +00:00
|
|
|
}
|
|
|
|
|
2021-06-08 08:54:14 +00:00
|
|
|
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
|
2014-11-19 15:37:00 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2018-11-06 19:08:41 +00:00
|
|
|
i915_reg_t psr_status;
|
|
|
|
u32 psr_status_mask;
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
2023-04-11 19:14:29 +00:00
|
|
|
psr_status = EDP_PSR2_STATUS(cpu_transcoder);
|
2018-11-06 19:08:41 +00:00
|
|
|
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
|
2014-11-14 16:52:28 +00:00
|
|
|
} else {
|
2023-06-09 14:13:54 +00:00
|
|
|
psr_status = psr_status_reg(dev_priv, cpu_transcoder);
|
2018-11-06 19:08:41 +00:00
|
|
|
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
2018-11-06 19:08:41 +00:00
|
|
|
|
|
|
|
/* Wait till PSR is idle */
|
2019-08-16 01:23:43 +00:00
|
|
|
if (intel_de_wait_for_clear(dev_priv, psr_status,
|
|
|
|
psr_status_mask, 2000))
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
|
2021-06-08 08:54:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2021-07-23 17:42:37 +00:00
|
|
|
enum phy phy = intel_port_to_phy(dev_priv,
|
|
|
|
dp_to_dig_port(intel_dp)->base.port);
|
2021-06-08 08:54:14 +00:00
|
|
|
|
|
|
|
lockdep_assert_held(&intel_dp->psr.lock);
|
|
|
|
|
|
|
|
if (!intel_dp->psr.enabled)
|
|
|
|
return;
|
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (intel_dp->psr.panel_replay_enabled)
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
|
|
|
|
else
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
|
|
|
|
intel_dp->psr.psr2_enabled ? "2" : "1");
|
2021-06-08 08:54:14 +00:00
|
|
|
|
|
|
|
intel_psr_exit(intel_dp);
|
|
|
|
intel_psr_wait_exit_locked(intel_dp);
|
2018-06-26 20:16:41 +00:00
|
|
|
|
2023-01-05 06:56:37 +00:00
|
|
|
/*
|
|
|
|
* Wa_16013835468
|
2023-03-29 15:07:00 +00:00
|
|
|
* Wa_14015648006
|
2023-01-05 06:56:37 +00:00
|
|
|
*/
|
2023-03-29 15:07:00 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) >= 11)
|
2023-01-05 06:56:37 +00:00
|
|
|
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
|
|
|
|
wa_16013835468_bit_get(intel_dp), 0);
|
|
|
|
|
2022-02-10 18:52:22 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
2022-12-09 22:05:43 +00:00
|
|
|
/* Wa_16012604467:adlp,mtl[a0,b0] */
|
2023-08-21 18:06:26 +00:00
|
|
|
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
|
2022-12-09 22:05:43 +00:00
|
|
|
intel_de_rmw(dev_priv,
|
2023-04-11 19:14:29 +00:00
|
|
|
MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
|
2022-12-09 22:05:43 +00:00
|
|
|
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
|
|
|
|
else if (IS_ALDERLAKE_P(dev_priv))
|
2022-02-10 18:52:22 +00:00
|
|
|
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
|
|
|
|
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
|
|
|
|
}
|
2021-09-14 21:25:07 +00:00
|
|
|
|
2021-07-23 17:42:37 +00:00
|
|
|
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
|
|
|
|
|
2018-06-26 20:16:41 +00:00
|
|
|
/* Disable PSR on Sink */
|
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled)
|
2019-11-28 01:48:50 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.enabled = false;
|
2023-11-08 07:23:02 +00:00
|
|
|
intel_dp->psr.panel_replay_enabled = false;
|
2022-04-14 15:11:18 +00:00
|
|
|
intel_dp->psr.psr2_enabled = false;
|
|
|
|
intel_dp->psr.psr2_sel_fetch_enabled = false;
|
|
|
|
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
|
2018-06-26 20:16:41 +00:00
|
|
|
}
|
|
|
|
|
2014-11-19 15:37:00 +00:00
|
|
|
/**
|
|
|
|
* intel_psr_disable - Disable PSR
|
|
|
|
* @intel_dp: Intel DP
|
2017-08-18 13:49:56 +00:00
|
|
|
* @old_crtc_state: old CRTC state
|
2014-11-19 15:37:00 +00:00
|
|
|
*
|
|
|
|
* This function needs to be called before disabling pipe.
|
|
|
|
*/
|
2017-08-18 13:49:56 +00:00
|
|
|
void intel_psr_disable(struct intel_dp *intel_dp,
|
|
|
|
const struct intel_crtc_state *old_crtc_state)
|
2014-11-19 15:37:00 +00:00
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2014-11-19 15:37:00 +00:00
|
|
|
|
2017-10-12 13:02:01 +00:00
|
|
|
if (!old_crtc_state->has_psr)
|
2017-09-07 23:00:31 +00:00
|
|
|
return;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
|
2018-01-03 21:38:24 +00:00
|
|
|
return;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
2018-08-09 14:21:01 +00:00
|
|
|
|
2018-06-26 20:16:41 +00:00
|
|
|
intel_psr_disable_locked(intel_dp);
|
2018-08-09 14:21:01 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
cancel_work_sync(&intel_dp->psr.work);
|
|
|
|
cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2021-06-08 08:54:14 +00:00
|
|
|
/**
|
|
|
|
* intel_psr_pause - Pause PSR
|
|
|
|
* @intel_dp: Intel DP
|
|
|
|
*
|
|
|
|
* This function need to be called after enabling psr.
|
|
|
|
*/
|
|
|
|
void intel_psr_pause(struct intel_dp *intel_dp)
|
|
|
|
{
|
2021-10-20 00:35:58 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2021-06-08 08:54:14 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
|
|
|
|
|
|
|
if (!CAN_PSR(intel_dp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
|
|
|
if (!psr->enabled) {
|
|
|
|
mutex_unlock(&psr->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-10-20 00:35:58 +00:00
|
|
|
/* If we ever hit this, we will need to add refcount to pause/resume */
|
|
|
|
drm_WARN_ON(&dev_priv->drm, psr->paused);
|
|
|
|
|
2021-06-08 08:54:14 +00:00
|
|
|
intel_psr_exit(intel_dp);
|
|
|
|
intel_psr_wait_exit_locked(intel_dp);
|
|
|
|
psr->paused = true;
|
|
|
|
|
|
|
|
mutex_unlock(&psr->lock);
|
|
|
|
|
|
|
|
cancel_work_sync(&psr->work);
|
|
|
|
cancel_delayed_work_sync(&psr->dc3co_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_psr_resume - Resume PSR
|
|
|
|
* @intel_dp: Intel DP
|
|
|
|
*
|
|
|
|
* This function need to be called after pausing psr.
|
|
|
|
*/
|
|
|
|
void intel_psr_resume(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
|
|
|
|
|
|
|
if (!CAN_PSR(intel_dp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
|
|
|
if (!psr->paused)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
psr->paused = false;
|
|
|
|
intel_psr_activate(intel_dp);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&psr->lock);
|
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:42 +00:00
|
|
|
static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2022-11-01 11:53:42 +00:00
|
|
|
return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
|
|
|
|
PSR2_MAN_TRK_CTL_ENABLE;
|
2022-04-05 15:53:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
|
2021-09-30 00:14:04 +00:00
|
|
|
{
|
2022-09-07 08:15:43 +00:00
|
|
|
return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
|
2021-09-30 00:14:04 +00:00
|
|
|
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
|
|
|
|
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
|
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:42 +00:00
|
|
|
static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
|
2022-02-25 07:02:28 +00:00
|
|
|
{
|
2022-11-01 11:53:42 +00:00
|
|
|
return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
|
2022-02-25 07:02:28 +00:00
|
|
|
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
|
|
|
|
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
|
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:44 +00:00
|
|
|
static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
2022-11-01 11:53:42 +00:00
|
|
|
return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
|
2022-04-05 15:53:44 +00:00
|
|
|
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
|
|
|
|
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
|
2019-03-08 00:00:49 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2021-02-04 13:40:14 +00:00
|
|
|
|
2021-09-30 00:14:04 +00:00
|
|
|
if (intel_dp->psr.psr2_sel_fetch_enabled)
|
2022-04-05 15:53:42 +00:00
|
|
|
intel_de_write(dev_priv,
|
2023-04-11 19:14:29 +00:00
|
|
|
PSR2_MAN_TRK_CTL(cpu_transcoder),
|
2022-04-05 15:53:42 +00:00
|
|
|
man_trk_ctl_enable_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_partial_frame_bit_get(dev_priv) |
|
2022-12-01 07:23:08 +00:00
|
|
|
man_trk_ctl_single_full_frame_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_continuos_full_frame(dev_priv));
|
2021-09-30 00:14:04 +00:00
|
|
|
|
2021-08-27 17:42:51 +00:00
|
|
|
/*
|
|
|
|
* Display WA #0884: skl+
|
|
|
|
* This documented WA for bxt can be safely applied
|
|
|
|
* broadly so we can force HW tracking to exit PSR
|
|
|
|
* instead of disabling and re-enabling.
|
|
|
|
* Workaround tells us to write 0 to CUR_SURFLIVE_A,
|
|
|
|
* but it makes more sense write to the current active
|
|
|
|
* pipe.
|
2021-09-30 00:14:04 +00:00
|
|
|
*
|
|
|
|
* This workaround do not exist for platforms with display 10 or newer
|
|
|
|
* but testing proved that it works for up display 13, for newer
|
|
|
|
* than that testing will be needed.
|
2021-08-27 17:42:51 +00:00
|
|
|
*/
|
|
|
|
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
|
2019-03-08 00:00:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 17:41:43 +00:00
|
|
|
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
2022-04-05 15:53:43 +00:00
|
|
|
struct intel_encoder *encoder;
|
2020-08-10 17:41:43 +00:00
|
|
|
|
2021-09-22 21:52:41 +00:00
|
|
|
if (!crtc_state->enable_psr2_sel_fetch)
|
2020-08-10 17:41:43 +00:00
|
|
|
return;
|
|
|
|
|
2022-04-05 15:53:43 +00:00
|
|
|
for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
|
|
|
|
crtc_state->uapi.encoder_mask) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
|
|
|
|
lockdep_assert_held(&intel_dp->psr.lock);
|
2022-04-05 15:53:44 +00:00
|
|
|
if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
|
|
|
|
return;
|
2022-04-05 15:53:43 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
|
2021-02-09 18:14:37 +00:00
|
|
|
crtc_state->psr2_man_track_ctl);
|
2020-08-10 17:41:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 19:52:38 +00:00
|
|
|
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
|
|
|
struct drm_rect *clip, bool full_update)
|
|
|
|
{
|
2021-06-25 23:55:59 +00:00
|
|
|
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
2022-04-05 15:53:42 +00:00
|
|
|
u32 val = man_trk_ctl_enable_bit_get(dev_priv);
|
2022-02-25 07:02:28 +00:00
|
|
|
|
|
|
|
/* SF partial frame enable has to be set even on full update */
|
|
|
|
val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
|
2020-10-07 19:52:38 +00:00
|
|
|
|
|
|
|
if (full_update) {
|
2021-09-30 00:14:06 +00:00
|
|
|
val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
|
2022-12-01 07:23:08 +00:00
|
|
|
val |= man_trk_ctl_continuos_full_frame(dev_priv);
|
2020-10-07 19:52:38 +00:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (clip->y1 == -1)
|
|
|
|
goto exit;
|
|
|
|
|
2022-09-07 08:15:43 +00:00
|
|
|
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
|
2021-06-25 23:55:59 +00:00
|
|
|
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
|
2021-09-14 21:25:03 +00:00
|
|
|
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
|
2021-06-25 23:55:59 +00:00
|
|
|
} else {
|
|
|
|
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
|
2021-01-04 20:56:52 +00:00
|
|
|
|
2021-06-25 23:55:59 +00:00
|
|
|
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
|
|
|
|
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
|
|
|
|
}
|
2020-10-07 19:52:38 +00:00
|
|
|
exit:
|
|
|
|
crtc_state->psr2_man_track_ctl = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clip_area_update(struct drm_rect *overlap_damage_area,
|
2022-05-13 14:28:11 +00:00
|
|
|
struct drm_rect *damage_area,
|
|
|
|
struct drm_rect *pipe_src)
|
2020-10-07 19:52:38 +00:00
|
|
|
{
|
2022-05-13 14:28:11 +00:00
|
|
|
if (!drm_rect_intersect(damage_area, pipe_src))
|
|
|
|
return;
|
|
|
|
|
2020-10-07 19:52:38 +00:00
|
|
|
if (overlap_damage_area->y1 == -1) {
|
|
|
|
overlap_damage_area->y1 = damage_area->y1;
|
|
|
|
overlap_damage_area->y2 = damage_area->y2;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (damage_area->y1 < overlap_damage_area->y1)
|
|
|
|
overlap_damage_area->y1 = damage_area->y1;
|
|
|
|
|
|
|
|
if (damage_area->y2 > overlap_damage_area->y2)
|
|
|
|
overlap_damage_area->y2 = damage_area->y2;
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
|
|
|
|
struct drm_rect *pipe_clip)
|
|
|
|
{
|
2021-06-25 23:55:59 +00:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
2022-09-07 08:15:43 +00:00
|
|
|
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
|
|
|
u16 y_alignment;
|
|
|
|
|
|
|
|
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
|
|
|
|
if (crtc_state->dsc.compression_enable &&
|
|
|
|
(IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
|
|
|
|
y_alignment = vdsc_cfg->slice_height;
|
|
|
|
else
|
|
|
|
y_alignment = crtc_state->su_y_granularity;
|
2021-06-16 20:31:53 +00:00
|
|
|
|
|
|
|
pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
|
|
|
|
if (pipe_clip->y2 % y_alignment)
|
|
|
|
pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
|
|
|
|
}
|
|
|
|
|
2021-09-30 00:14:01 +00:00
|
|
|
/*
|
|
|
|
* TODO: Not clear how to handle planes with negative position,
|
|
|
|
* also planes are not updated if they have a negative X
|
|
|
|
* position so for now doing a full update in this cases
|
|
|
|
*
|
|
|
|
* Plane scaling and rotation is not supported by selective fetch and both
|
|
|
|
* properties can change without a modeset, so need to be check at every
|
2022-07-01 20:32:36 +00:00
|
|
|
* atomic commit.
|
2021-09-30 00:14:01 +00:00
|
|
|
*/
|
|
|
|
static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
|
|
|
|
{
|
|
|
|
if (plane_state->uapi.dst.y1 < 0 ||
|
|
|
|
plane_state->uapi.dst.x1 < 0 ||
|
|
|
|
plane_state->scaler_id >= 0 ||
|
|
|
|
plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for pipe properties that is not supported by selective fetch.
|
|
|
|
*
|
|
|
|
* TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
|
|
|
|
* after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
|
|
|
|
* enabled and going to the full update path.
|
|
|
|
*/
|
|
|
|
static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
if (crtc_state->scaler_state.scaler_id >= 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-07 19:52:38 +00:00
|
|
|
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
|
|
|
struct intel_crtc *crtc)
|
2020-08-10 17:41:43 +00:00
|
|
|
{
|
2022-05-13 14:28:10 +00:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
2020-08-10 17:41:43 +00:00
|
|
|
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
2021-01-04 20:56:52 +00:00
|
|
|
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
|
2020-10-07 19:52:38 +00:00
|
|
|
struct intel_plane_state *new_plane_state, *old_plane_state;
|
|
|
|
struct intel_plane *plane;
|
|
|
|
bool full_update = false;
|
|
|
|
int i, ret;
|
2020-08-10 17:41:43 +00:00
|
|
|
|
|
|
|
if (!crtc_state->enable_psr2_sel_fetch)
|
2020-10-07 19:52:38 +00:00
|
|
|
return 0;
|
|
|
|
|
2021-09-30 00:14:01 +00:00
|
|
|
if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
|
|
|
|
full_update = true;
|
|
|
|
goto skip_sel_fetch_set_loop;
|
|
|
|
}
|
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
/*
|
|
|
|
* Calculate minimal selective fetch area of each plane and calculate
|
|
|
|
* the pipe damaged area.
|
|
|
|
* In the next loop the plane selective fetch area will actually be set
|
|
|
|
* using whole pipe damaged area.
|
|
|
|
*/
|
2020-10-07 19:52:38 +00:00
|
|
|
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
|
|
|
new_plane_state, i) {
|
2022-05-13 14:28:11 +00:00
|
|
|
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
|
|
|
|
.x2 = INT_MAX };
|
2020-10-07 19:52:38 +00:00
|
|
|
|
|
|
|
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
|
|
|
|
continue;
|
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
if (!new_plane_state->uapi.visible &&
|
|
|
|
!old_plane_state->uapi.visible)
|
|
|
|
continue;
|
|
|
|
|
2021-09-30 00:14:01 +00:00
|
|
|
if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
|
2020-10-07 19:52:38 +00:00
|
|
|
full_update = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-01-04 20:56:52 +00:00
|
|
|
* If visibility or plane moved, mark the whole plane area as
|
|
|
|
* damaged as it needs to be complete redraw in the new and old
|
|
|
|
* position.
|
2020-10-07 19:52:38 +00:00
|
|
|
*/
|
2021-01-04 20:56:52 +00:00
|
|
|
if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
|
|
|
|
!drm_rect_equals(&new_plane_state->uapi.dst,
|
|
|
|
&old_plane_state->uapi.dst)) {
|
|
|
|
if (old_plane_state->uapi.visible) {
|
|
|
|
damaged_area.y1 = old_plane_state->uapi.dst.y1;
|
|
|
|
damaged_area.y2 = old_plane_state->uapi.dst.y2;
|
2022-05-13 14:28:11 +00:00
|
|
|
clip_area_update(&pipe_clip, &damaged_area,
|
|
|
|
&crtc_state->pipe_src);
|
2021-01-04 20:56:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (new_plane_state->uapi.visible) {
|
|
|
|
damaged_area.y1 = new_plane_state->uapi.dst.y1;
|
|
|
|
damaged_area.y2 = new_plane_state->uapi.dst.y2;
|
2022-05-13 14:28:11 +00:00
|
|
|
clip_area_update(&pipe_clip, &damaged_area,
|
|
|
|
&crtc_state->pipe_src);
|
2021-01-04 20:56:52 +00:00
|
|
|
}
|
|
|
|
continue;
|
2021-09-14 21:25:06 +00:00
|
|
|
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
|
|
|
|
/* If alpha changed mark the whole plane area as damaged */
|
2021-01-04 20:56:52 +00:00
|
|
|
damaged_area.y1 = new_plane_state->uapi.dst.y1;
|
|
|
|
damaged_area.y2 = new_plane_state->uapi.dst.y2;
|
2022-05-13 14:28:11 +00:00
|
|
|
clip_area_update(&pipe_clip, &damaged_area,
|
|
|
|
&crtc_state->pipe_src);
|
2021-01-04 20:56:52 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-08-23 11:29:18 +00:00
|
|
|
src = drm_plane_state_src(&new_plane_state->uapi);
|
|
|
|
drm_rect_fp_to_int(&src, &src);
|
2021-01-04 20:56:52 +00:00
|
|
|
|
2022-08-23 11:29:19 +00:00
|
|
|
if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
|
|
|
|
&new_plane_state->uapi, &damaged_area))
|
2021-01-04 20:56:52 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
|
|
|
|
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
|
2022-08-23 11:29:19 +00:00
|
|
|
damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
|
|
|
|
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
|
|
|
|
|
2022-05-13 14:28:11 +00:00
|
|
|
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
|
2021-01-04 20:56:52 +00:00
|
|
|
}
|
|
|
|
|
2022-05-13 14:28:10 +00:00
|
|
|
/*
|
|
|
|
* TODO: For now we are just using full update in case
|
|
|
|
* selective fetch area calculation fails. To optimize this we
|
|
|
|
* should identify cases where this happens and fix the area
|
|
|
|
* calculation for those.
|
|
|
|
*/
|
|
|
|
if (pipe_clip.y1 == -1) {
|
|
|
|
drm_info_once(&dev_priv->drm,
|
|
|
|
"Selective fetch area calculation failed in pipe %c\n",
|
|
|
|
pipe_name(crtc->pipe));
|
|
|
|
full_update = true;
|
|
|
|
}
|
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
if (full_update)
|
|
|
|
goto skip_sel_fetch_set_loop;
|
2020-11-30 12:57:50 +00:00
|
|
|
|
2023-01-24 10:26:35 +00:00
|
|
|
/* Wa_14014971492 */
|
2023-08-21 18:06:26 +00:00
|
|
|
if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
|
2023-01-24 10:26:35 +00:00
|
|
|
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
|
|
|
|
crtc_state->splitter.enable)
|
|
|
|
pipe_clip.y1 = 0;
|
|
|
|
|
2021-09-14 21:25:06 +00:00
|
|
|
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-06-16 20:31:53 +00:00
|
|
|
intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
|
2021-01-04 20:56:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have the pipe damaged area check if it intersect with
|
|
|
|
* every plane, if it does set the plane selective fetch area.
|
|
|
|
*/
|
|
|
|
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
|
|
|
new_plane_state, i) {
|
|
|
|
struct drm_rect *sel_fetch_area, inter;
|
2021-10-21 10:10:23 +00:00
|
|
|
struct intel_plane *linked = new_plane_state->planar_linked_plane;
|
2021-01-04 20:56:52 +00:00
|
|
|
|
|
|
|
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
|
|
|
|
!new_plane_state->uapi.visible)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
inter = pipe_clip;
|
2023-11-20 08:26:06 +00:00
|
|
|
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
|
|
|
|
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
|
|
|
|
sel_fetch_area->y1 = -1;
|
|
|
|
sel_fetch_area->y2 = -1;
|
|
|
|
/*
|
|
|
|
* if plane sel fetch was previously enabled ->
|
|
|
|
* disable it
|
|
|
|
*/
|
|
|
|
if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
|
|
|
|
crtc_state->update_planes |= BIT(plane->id);
|
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
continue;
|
2023-11-20 08:26:06 +00:00
|
|
|
}
|
2021-01-04 20:56:52 +00:00
|
|
|
|
2021-09-30 00:14:01 +00:00
|
|
|
if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
|
|
|
|
full_update = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
|
|
|
|
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
|
|
|
|
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
|
2021-07-17 01:12:25 +00:00
|
|
|
crtc_state->update_planes |= BIT(plane->id);
|
2021-10-21 10:10:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sel_fetch_area is calculated for UV plane. Use
|
|
|
|
* same area for Y plane as well.
|
|
|
|
*/
|
|
|
|
if (linked) {
|
2021-11-08 21:38:07 +00:00
|
|
|
struct intel_plane_state *linked_new_plane_state;
|
|
|
|
struct drm_rect *linked_sel_fetch_area;
|
2021-10-21 10:10:23 +00:00
|
|
|
|
2021-11-08 21:38:07 +00:00
|
|
|
linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
|
|
|
|
if (IS_ERR(linked_new_plane_state))
|
|
|
|
return PTR_ERR(linked_new_plane_state);
|
|
|
|
|
|
|
|
linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
|
2021-10-21 10:10:23 +00:00
|
|
|
linked_sel_fetch_area->y1 = sel_fetch_area->y1;
|
|
|
|
linked_sel_fetch_area->y2 = sel_fetch_area->y2;
|
2021-11-08 21:38:07 +00:00
|
|
|
crtc_state->update_planes |= BIT(linked->id);
|
2021-10-21 10:10:23 +00:00
|
|
|
}
|
2020-10-07 19:52:38 +00:00
|
|
|
}
|
2020-08-10 17:41:43 +00:00
|
|
|
|
2021-01-04 20:56:52 +00:00
|
|
|
skip_sel_fetch_set_loop:
|
2020-10-07 19:52:38 +00:00
|
|
|
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
|
|
|
|
return 0;
|
2020-08-10 17:41:43 +00:00
|
|
|
}
|
|
|
|
|
2021-10-22 10:32:56 +00:00
|
|
|
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
|
|
|
|
struct intel_crtc *crtc)
|
2019-02-06 21:18:45 +00:00
|
|
|
{
|
2021-10-22 10:32:56 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
2022-07-14 15:07:55 +00:00
|
|
|
const struct intel_crtc_state *old_crtc_state =
|
|
|
|
intel_atomic_get_old_crtc_state(state, crtc);
|
|
|
|
const struct intel_crtc_state *new_crtc_state =
|
2022-07-14 15:07:54 +00:00
|
|
|
intel_atomic_get_new_crtc_state(state, crtc);
|
|
|
|
struct intel_encoder *encoder;
|
2019-02-06 21:18:45 +00:00
|
|
|
|
2021-10-22 10:32:56 +00:00
|
|
|
if (!HAS_PSR(i915))
|
|
|
|
return;
|
|
|
|
|
2022-07-14 15:07:54 +00:00
|
|
|
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
|
2022-07-14 15:07:55 +00:00
|
|
|
old_crtc_state->uapi.encoder_mask) {
|
2022-07-14 15:07:54 +00:00
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
|
|
|
bool needs_to_disable = false;
|
|
|
|
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reasons to disable:
|
|
|
|
* - PSR disabled in new state
|
|
|
|
* - All planes will go inactive
|
|
|
|
* - Changing between PSR versions
|
2023-03-29 15:07:03 +00:00
|
|
|
* - Display WA #1136: skl, bxt
|
2022-07-14 15:07:54 +00:00
|
|
|
*/
|
2022-07-14 15:07:55 +00:00
|
|
|
needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
|
|
|
|
needs_to_disable |= !new_crtc_state->has_psr;
|
|
|
|
needs_to_disable |= !new_crtc_state->active_planes;
|
|
|
|
needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
|
2023-03-29 15:07:03 +00:00
|
|
|
needs_to_disable |= DISPLAY_VER(i915) < 11 &&
|
|
|
|
new_crtc_state->wm_level_disabled;
|
2022-07-14 15:07:54 +00:00
|
|
|
|
|
|
|
if (psr->enabled && needs_to_disable)
|
|
|
|
intel_psr_disable_locked(intel_dp);
|
2023-03-29 15:07:00 +00:00
|
|
|
else if (psr->enabled && new_crtc_state->wm_level_disabled)
|
|
|
|
/* Wa_14015648006 */
|
|
|
|
wm_optimization_wa(intel_dp, new_crtc_state);
|
2022-07-14 15:07:54 +00:00
|
|
|
|
|
|
|
mutex_unlock(&psr->lock);
|
2021-09-22 21:52:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-04 15:55:56 +00:00
|
|
|
void intel_psr_post_plane_update(struct intel_atomic_state *state,
|
|
|
|
struct intel_crtc *crtc)
|
2021-09-22 21:52:42 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
2023-10-04 15:55:56 +00:00
|
|
|
const struct intel_crtc_state *crtc_state =
|
|
|
|
intel_atomic_get_new_crtc_state(state, crtc);
|
2021-09-22 21:52:42 +00:00
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
2023-11-08 07:23:02 +00:00
|
|
|
if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
|
2021-09-22 21:52:42 +00:00
|
|
|
return;
|
2019-02-06 21:18:45 +00:00
|
|
|
|
2021-09-22 21:52:42 +00:00
|
|
|
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
|
|
|
|
crtc_state->uapi.encoder_mask) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
2023-03-29 15:06:58 +00:00
|
|
|
bool keep_disabled = false;
|
2021-09-22 21:52:42 +00:00
|
|
|
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
|
|
|
drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
|
|
|
|
|
2023-03-29 15:06:58 +00:00
|
|
|
keep_disabled |= psr->sink_not_reliable;
|
|
|
|
keep_disabled |= !crtc_state->active_planes;
|
|
|
|
|
2023-03-29 15:07:03 +00:00
|
|
|
/* Display WA #1136: skl, bxt */
|
|
|
|
keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
|
|
|
|
crtc_state->wm_level_disabled;
|
|
|
|
|
2023-03-29 15:06:58 +00:00
|
|
|
if (!psr->enabled && !keep_disabled)
|
2021-09-22 21:52:42 +00:00
|
|
|
intel_psr_enable_locked(intel_dp, crtc_state);
|
2023-03-29 15:07:00 +00:00
|
|
|
else if (psr->enabled && !crtc_state->wm_level_disabled)
|
|
|
|
/* Wa_14015648006 */
|
|
|
|
wm_optimization_wa(intel_dp, crtc_state);
|
2019-02-06 21:18:45 +00:00
|
|
|
|
2019-03-08 00:00:49 +00:00
|
|
|
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
|
|
|
|
if (crtc_state->crc_enabled && psr->enabled)
|
2021-02-04 13:40:14 +00:00
|
|
|
psr_force_hw_tracking_exit(intel_dp);
|
2019-03-08 00:00:49 +00:00
|
|
|
|
2023-09-01 09:34:58 +00:00
|
|
|
/*
|
|
|
|
* Clear possible busy bits in case we have
|
|
|
|
* invalidate -> flip -> flush sequence.
|
|
|
|
*/
|
|
|
|
intel_dp->psr.busy_frontbuffer_bits = 0;
|
|
|
|
|
2021-09-22 21:52:42 +00:00
|
|
|
mutex_unlock(&psr->lock);
|
2019-03-08 00:00:49 +00:00
|
|
|
}
|
2021-09-22 21:52:42 +00:00
|
|
|
}
|
2019-02-06 21:18:45 +00:00
|
|
|
|
2021-10-05 23:18:51 +00:00
|
|
|
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2021-10-05 23:18:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
|
|
|
|
* As all higher states has bit 4 of PSR2 state set we can just wait for
|
|
|
|
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
|
|
|
|
*/
|
|
|
|
return intel_de_wait_for_clear(dev_priv,
|
2023-04-11 19:14:29 +00:00
|
|
|
EDP_PSR2_STATUS(cpu_transcoder),
|
2021-10-05 23:18:51 +00:00
|
|
|
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
|
2018-06-27 20:02:49 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2018-06-27 20:02:49 +00:00
|
|
|
|
|
|
|
/*
|
2018-08-24 23:08:44 +00:00
|
|
|
* From bspec: Panel Self Refresh (BDW+)
|
|
|
|
* Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
|
|
|
|
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
|
|
|
|
* defensive enough to cover everything.
|
2018-06-27 20:02:49 +00:00
|
|
|
*/
|
2021-10-05 23:18:51 +00:00
|
|
|
return intel_de_wait_for_clear(dev_priv,
|
2023-06-09 14:13:54 +00:00
|
|
|
psr_status_reg(dev_priv, cpu_transcoder),
|
2021-10-05 23:18:51 +00:00
|
|
|
EDP_PSR_STATUS_STATE_MASK, 50);
|
2018-06-27 20:02:49 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
/**
|
2022-04-05 15:53:43 +00:00
|
|
|
* intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
|
2021-02-04 13:40:14 +00:00
|
|
|
* @new_crtc_state: new CRTC state
|
|
|
|
*
|
|
|
|
* This function is expected to be called from pipe_update_start() where it is
|
|
|
|
* not expected to race with PSR enable or disable.
|
|
|
|
*/
|
2022-04-05 15:53:43 +00:00
|
|
|
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
|
2021-02-04 13:40:14 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
|
|
|
if (!new_crtc_state->has_psr)
|
|
|
|
return;
|
|
|
|
|
2021-02-09 18:14:36 +00:00
|
|
|
for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
|
|
|
|
new_crtc_state->uapi.encoder_mask) {
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
2021-10-05 23:18:51 +00:00
|
|
|
int ret;
|
2021-02-04 13:40:14 +00:00
|
|
|
|
2022-04-05 15:53:43 +00:00
|
|
|
lockdep_assert_held(&intel_dp->psr.lock);
|
2021-10-05 23:18:51 +00:00
|
|
|
|
2022-04-05 15:53:43 +00:00
|
|
|
if (!intel_dp->psr.enabled)
|
2021-02-04 13:40:14 +00:00
|
|
|
continue;
|
|
|
|
|
2021-10-05 23:18:51 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled)
|
|
|
|
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
|
|
|
|
else
|
|
|
|
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
|
2021-02-04 13:40:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2018-04-05 11:49:15 +00:00
|
|
|
i915_reg_t reg;
|
|
|
|
u32 mask;
|
|
|
|
int err;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!intel_dp->psr.enabled)
|
2018-04-05 11:49:15 +00:00
|
|
|
return false;
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
2023-04-11 19:14:29 +00:00
|
|
|
reg = EDP_PSR2_STATUS(cpu_transcoder);
|
2018-05-11 23:00:59 +00:00
|
|
|
mask = EDP_PSR2_STATUS_STATE_MASK;
|
2014-11-19 15:37:47 +00:00
|
|
|
} else {
|
2023-06-09 14:13:54 +00:00
|
|
|
reg = psr_status_reg(dev_priv, cpu_transcoder);
|
2018-05-11 23:00:59 +00:00
|
|
|
mask = EDP_PSR_STATUS_STATE_MASK;
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
2018-04-05 11:49:15 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
2018-04-05 11:49:15 +00:00
|
|
|
|
2019-08-16 01:23:43 +00:00
|
|
|
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
|
2018-04-05 11:49:15 +00:00
|
|
|
if (err)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"Timed out waiting for PSR Idle for re-enable\n");
|
2018-04-05 11:49:15 +00:00
|
|
|
|
|
|
|
/* After the unlocked wait, verify that PSR is still wanted! */
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
return err == 0 && intel_dp->psr.enabled;
|
2018-04-05 11:49:15 +00:00
|
|
|
}
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2019-02-06 21:18:45 +00:00
|
|
|
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
|
2018-08-08 14:19:11 +00:00
|
|
|
{
|
2020-10-07 19:52:37 +00:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2019-02-06 21:18:45 +00:00
|
|
|
struct drm_modeset_acquire_ctx ctx;
|
|
|
|
struct drm_atomic_state *state;
|
2020-10-07 19:52:37 +00:00
|
|
|
struct drm_connector *conn;
|
|
|
|
int err = 0;
|
2018-08-08 14:19:11 +00:00
|
|
|
|
2022-10-06 20:48:44 +00:00
|
|
|
state = drm_atomic_state_alloc(&dev_priv->drm);
|
2019-02-06 21:18:45 +00:00
|
|
|
if (!state)
|
|
|
|
return -ENOMEM;
|
2018-08-08 14:19:11 +00:00
|
|
|
|
2019-02-06 21:18:45 +00:00
|
|
|
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
|
2023-03-28 12:23:57 +00:00
|
|
|
|
2019-02-06 21:18:45 +00:00
|
|
|
state->acquire_ctx = &ctx;
|
2023-03-28 12:23:57 +00:00
|
|
|
to_intel_atomic_state(state)->internal = true;
|
2019-02-06 21:18:45 +00:00
|
|
|
|
|
|
|
retry:
|
2022-10-06 20:48:44 +00:00
|
|
|
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
|
2020-10-07 19:52:37 +00:00
|
|
|
drm_for_each_connector_iter(conn, &conn_iter) {
|
|
|
|
struct drm_connector_state *conn_state;
|
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
|
|
|
|
if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
conn_state = drm_atomic_get_connector_state(state, conn);
|
|
|
|
if (IS_ERR(conn_state)) {
|
|
|
|
err = PTR_ERR(conn_state);
|
|
|
|
break;
|
2019-02-06 21:18:45 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 19:52:37 +00:00
|
|
|
if (!conn_state->crtc)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
|
|
|
|
if (IS_ERR(crtc_state)) {
|
|
|
|
err = PTR_ERR(crtc_state);
|
2019-02-06 21:18:45 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-10-07 19:52:37 +00:00
|
|
|
|
|
|
|
/* Mark mode as changed to trigger a pipe->update() */
|
|
|
|
crtc_state->mode_changed = true;
|
2019-02-06 21:18:45 +00:00
|
|
|
}
|
2020-10-07 19:52:37 +00:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2019-02-06 21:18:45 +00:00
|
|
|
|
2020-10-07 19:52:37 +00:00
|
|
|
if (err == 0)
|
|
|
|
err = drm_atomic_commit(state);
|
2018-08-08 14:19:11 +00:00
|
|
|
|
2019-02-06 21:18:45 +00:00
|
|
|
if (err == -EDEADLK) {
|
|
|
|
drm_atomic_state_clear(state);
|
|
|
|
err = drm_modeset_backoff(&ctx);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_modeset_drop_locks(&ctx);
|
|
|
|
drm_modeset_acquire_fini(&ctx);
|
|
|
|
drm_atomic_state_put(state);
|
|
|
|
|
|
|
|
return err;
|
2018-08-08 14:19:11 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
|
2018-08-09 14:21:01 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2019-02-06 21:18:45 +00:00
|
|
|
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
|
|
|
|
u32 old_mode;
|
2018-08-09 14:21:01 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
|
2021-02-09 20:50:36 +00:00
|
|
|
mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
|
2018-08-09 14:21:01 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
ret = mutex_lock_interruptible(&intel_dp->psr.lock);
|
2018-08-09 14:21:01 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
|
|
|
|
intel_dp->psr.debug = val;
|
2019-09-04 21:34:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do it right away if it's already enabled, otherwise it will be done
|
|
|
|
* when enabling the source.
|
|
|
|
*/
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.enabled)
|
|
|
|
psr_irq_control(intel_dp);
|
2018-08-09 14:21:01 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
2019-02-06 21:18:45 +00:00
|
|
|
|
|
|
|
if (old_mode != mode)
|
|
|
|
ret = intel_psr_fastset_force(dev_priv);
|
|
|
|
|
2018-08-09 14:21:01 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
static void intel_psr_handle_irq(struct intel_dp *intel_dp)
|
2018-11-21 22:54:39 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_psr_disable_locked(intel_dp);
|
2018-11-21 22:54:39 +00:00
|
|
|
psr->sink_not_reliable = true;
|
|
|
|
/* let's make sure that sink is awaken */
|
2021-02-04 13:40:14 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
2018-11-21 22:54:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-05 11:49:15 +00:00
|
|
|
static void intel_psr_work(struct work_struct *work)
|
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_dp *intel_dp =
|
|
|
|
container_of(work, typeof(*intel_dp), psr.work);
|
2018-04-05 11:49:15 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
2018-04-05 11:49:15 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!intel_dp->psr.enabled)
|
2018-06-13 19:26:00 +00:00
|
|
|
goto unlock;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (READ_ONCE(intel_dp->psr.irq_aux_error))
|
|
|
|
intel_psr_handle_irq(intel_dp);
|
2018-11-21 22:54:39 +00:00
|
|
|
|
2018-04-05 11:49:15 +00:00
|
|
|
/*
|
|
|
|
* We have to make sure PSR is ready for re-enable
|
|
|
|
* otherwise it keeps disabled until next full enable/disable cycle.
|
|
|
|
* PSR might take some time to get fully disabled
|
|
|
|
* and be ready for re-enable.
|
|
|
|
*/
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!__psr_wait_for_idle_locked(intel_dp))
|
2014-11-14 16:52:28 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The delayed work can race with an invalidate hence we need to
|
|
|
|
* recheck. Since psr_flush first clears this and then reschedules we
|
|
|
|
* won't ever miss a flush when bailing out here.
|
|
|
|
*/
|
2021-02-04 13:40:14 +00:00
|
|
|
if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
|
2014-11-14 16:52:28 +00:00
|
|
|
goto unlock;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_psr_activate(intel_dp);
|
2014-11-14 16:52:28 +00:00
|
|
|
unlock:
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:44 +00:00
|
|
|
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2022-04-05 15:53:44 +00:00
|
|
|
|
|
|
|
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
|
|
|
u32 val;
|
|
|
|
|
2022-10-24 05:46:49 +00:00
|
|
|
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
|
|
|
|
/* Send one update otherwise lag is observed in screen */
|
|
|
|
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
|
2022-04-05 15:53:44 +00:00
|
|
|
return;
|
2022-10-24 05:46:49 +00:00
|
|
|
}
|
2022-04-05 15:53:44 +00:00
|
|
|
|
|
|
|
val = man_trk_ctl_enable_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_partial_frame_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_continuos_full_frame(dev_priv);
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
|
2022-04-05 15:53:44 +00:00
|
|
|
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
|
|
|
|
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
|
|
|
|
} else {
|
|
|
|
intel_psr_exit(intel_dp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-14 16:52:29 +00:00
|
|
|
/**
|
2022-07-01 20:32:36 +00:00
|
|
|
* intel_psr_invalidate - Invalidate PSR
|
2016-08-04 15:32:38 +00:00
|
|
|
* @dev_priv: i915 device
|
2014-11-14 16:52:29 +00:00
|
|
|
* @frontbuffer_bits: frontbuffer plane tracking bits
|
2018-03-07 03:34:20 +00:00
|
|
|
* @origin: which operation caused the invalidate
|
2014-11-14 16:52:29 +00:00
|
|
|
*
|
|
|
|
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
|
|
|
* with the software frontbuffer tracking. This function gets called every
|
|
|
|
* time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
|
|
|
|
* disabled if the frontbuffer mask contains a buffer relevant to PSR.
|
|
|
|
*
|
|
|
|
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
|
|
|
|
*/
|
2016-08-04 15:32:38 +00:00
|
|
|
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
2018-03-07 03:34:20 +00:00
|
|
|
unsigned frontbuffer_bits, enum fb_op_origin origin)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_encoder *encoder;
|
2017-09-07 23:00:31 +00:00
|
|
|
|
2018-05-11 23:00:59 +00:00
|
|
|
if (origin == ORIGIN_FLIP)
|
2018-03-07 03:34:20 +00:00
|
|
|
return;
|
|
|
|
|
2021-02-09 18:14:36 +00:00
|
|
|
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
2021-02-04 13:40:14 +00:00
|
|
|
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
if (!intel_dp->psr.enabled) {
|
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-18 08:30:26 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
pipe_frontbuffer_bits &=
|
|
|
|
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
|
|
|
|
intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
|
2015-06-18 08:30:26 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (pipe_frontbuffer_bits)
|
2022-04-05 15:53:44 +00:00
|
|
|
_psr_invalidate_handle(intel_dp);
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
}
|
|
|
|
}
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
/*
|
|
|
|
* When we will be completely rely on PSR2 S/W tracking in future,
|
|
|
|
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
|
2021-09-30 00:14:05 +00:00
|
|
|
* event also therefore tgl_dc3co_flush_locked() require to be changed
|
2020-02-05 21:49:45 +00:00
|
|
|
* accordingly in future.
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
*/
|
|
|
|
static void
|
2021-09-30 00:14:05 +00:00
|
|
|
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
|
|
|
|
enum fb_op_origin origin)
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
{
|
2023-06-08 13:35:45 +00:00
|
|
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
|
|
|
|
2021-09-30 00:14:05 +00:00
|
|
|
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
|
|
|
|
!intel_dp->psr.active)
|
|
|
|
return;
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At every frontbuffer flush flip event modified delay of delayed work,
|
|
|
|
* when delayed work schedules that means display has been idle.
|
|
|
|
*/
|
|
|
|
if (!(frontbuffer_bits &
|
2021-02-04 13:40:14 +00:00
|
|
|
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
|
2021-09-30 00:14:05 +00:00
|
|
|
return;
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
tgl_psr2_enable_dc3co(intel_dp);
|
2023-06-08 13:35:45 +00:00
|
|
|
mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
|
2021-02-04 13:40:14 +00:00
|
|
|
intel_dp->psr.dc3co_exit_delay);
|
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame
while an active video playback, playing 30fps video on 60hz panel
is the classic example of this use case.
B.Specs:49196 has a restriction to enable DC3CO only for Video Playback.
It will be worthy to enable DC3CO after completion of each pageflip
and switch back to DC5 when display is idle because driver doesn't
differentiate between video playback and a normal pageflip.
We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO
state only for ORIGIN_FLIP flush call, because DC3CO state has primarily
targeted for VPB use case. We are not interested here for frontbuffer
invalidates calls because that triggers PSR2 exit, which will
explicitly disable DC3CO.
DC5 and DC6 saves more power, but can't be entered during video
playback because there are not enough idle frames in a row to meet
most PSR2 panel deep sleep entry requirement typically 4 frames.
As PSR2 existing implementation is using minimum 6 idle frames for
deep sleep, it is safer to enable DC5/6 after 6 idle frames
(By scheduling a delayed work of 6 idle frames, once DC3CO has been
enabled after a pageflip).
After manually waiting for 6 idle frames DC5/6 will be enabled and
PSR2 deep sleep idle frames will be restored to 6 idle frames, at this
point DMC will triggers DC5/6 once PSR2 enters to deep sleep after
6 idle frames.
In future when we will enable S/W PSR2 tracking, we can change the
PSR2 required deep sleep idle frames to 1 so DMC can trigger the
DC5/6 immediately after S/W manual waiting of 6 idle frames get
complete.
v2: calculated s/w state to switch over dc3co when there is an
update. [Imre]
Used cancel_delayed_work_sync() in order to avoid any race
with already scheduled delayed work. [Imre]
v3: Cancel_delayed_work_sync() may blocked the commit work.
hence dropping it, dc5_idle_thread() checks the valid wakeref before
putting the reference count, which avoids any chances of dropping
a zero wakeref. [Imre (IRC)]
v4: Used frontbuffer flush mechanism. [Imre]
v5: Used psr.pipe to extract frontbuffer busy bits. [Imre]
Used cancel_delayed_work_sync() in encoder disable path. [Imre]
Used mod_delayed_work() instead of cancelling and scheduling a
delayed work. [Imre]
Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep
sleep. [Imre]
Removed DC5_REQ_IDLE_FRAMES macro. [Imre]
v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask
checks, used delayed_work_pending with the psr lock and removed the
psr2_deep_slp_disabled flag. [Imre]
v7: Code refactoring, moved most of functional code to inte_psr.c [Imre]
Using frontbuffer_bits on psr.pipe check instead of
busy_frontbuffer_bits. [Imre]
Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre]
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Animesh Manna <animesh.manna@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
2019-10-03 08:17:37 +00:00
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:44 +00:00
|
|
|
static void _psr_flush_handle(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2022-04-05 15:53:44 +00:00
|
|
|
|
|
|
|
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
|
|
|
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
|
|
|
|
/* can we turn CFF off? */
|
|
|
|
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
|
|
|
|
u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
|
2022-12-01 07:23:08 +00:00
|
|
|
man_trk_ctl_partial_frame_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_single_full_frame_bit_get(dev_priv) |
|
|
|
|
man_trk_ctl_continuos_full_frame(dev_priv);
|
2022-04-05 15:53:44 +00:00
|
|
|
|
|
|
|
/*
|
2022-12-01 07:23:08 +00:00
|
|
|
* Set psr2_sel_fetch_cff_enabled as false to allow selective
|
|
|
|
* updates. Still keep cff bit enabled as we don't have proper
|
|
|
|
* SU configuration in case update is sent for any reason after
|
|
|
|
* sff bit gets cleared by the HW on next vblank.
|
2022-04-05 15:53:44 +00:00
|
|
|
*/
|
2023-04-11 19:14:29 +00:00
|
|
|
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
|
2022-04-05 15:53:44 +00:00
|
|
|
val);
|
|
|
|
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
|
|
|
|
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* continuous full frame is disabled, only a single full
|
|
|
|
* frame is required
|
|
|
|
*/
|
|
|
|
psr_force_hw_tracking_exit(intel_dp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
psr_force_hw_tracking_exit(intel_dp);
|
|
|
|
|
|
|
|
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
|
2023-06-08 13:35:45 +00:00
|
|
|
queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
|
2022-04-05 15:53:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-14 16:52:29 +00:00
|
|
|
/**
|
|
|
|
* intel_psr_flush - Flush PSR
|
2016-08-04 15:32:38 +00:00
|
|
|
* @dev_priv: i915 device
|
2014-11-14 16:52:29 +00:00
|
|
|
* @frontbuffer_bits: frontbuffer plane tracking bits
|
2015-07-08 23:21:31 +00:00
|
|
|
* @origin: which operation caused the flush
|
2014-11-14 16:52:29 +00:00
|
|
|
*
|
|
|
|
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
|
|
|
* with the software frontbuffer tracking. This function gets called every
|
|
|
|
* time frontbuffer rendering has completed and flushed out to memory. PSR
|
|
|
|
* can be enabled again if no other frontbuffer relevant to PSR is dirty.
|
|
|
|
*
|
|
|
|
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
|
|
|
|
*/
|
2016-08-04 15:32:38 +00:00
|
|
|
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
2015-07-08 23:21:31 +00:00
|
|
|
unsigned frontbuffer_bits, enum fb_op_origin origin)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_encoder *encoder;
|
2017-09-07 23:00:31 +00:00
|
|
|
|
2021-02-09 18:14:36 +00:00
|
|
|
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
2021-02-04 13:40:14 +00:00
|
|
|
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
2018-03-07 03:34:20 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
if (!intel_dp->psr.enabled) {
|
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
continue;
|
|
|
|
}
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
pipe_frontbuffer_bits &=
|
|
|
|
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
|
|
|
|
intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
|
2014-11-14 16:52:28 +00:00
|
|
|
|
2021-06-08 08:54:14 +00:00
|
|
|
/*
|
|
|
|
* If the PSR is paused by an explicit intel_psr_paused() call,
|
|
|
|
* we have to ensure that the PSR is not activated until
|
|
|
|
* intel_psr_resume() is called.
|
|
|
|
*/
|
2022-04-05 15:53:44 +00:00
|
|
|
if (intel_dp->psr.paused)
|
|
|
|
goto unlock;
|
2021-06-08 08:54:14 +00:00
|
|
|
|
2021-09-30 00:14:05 +00:00
|
|
|
if (origin == ORIGIN_FLIP ||
|
|
|
|
(origin == ORIGIN_CURSOR_UPDATE &&
|
|
|
|
!intel_dp->psr.psr2_sel_fetch_enabled)) {
|
|
|
|
tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
|
2022-04-05 15:53:44 +00:00
|
|
|
goto unlock;
|
2021-09-30 00:14:05 +00:00
|
|
|
}
|
|
|
|
|
2022-04-05 15:53:44 +00:00
|
|
|
if (pipe_frontbuffer_bits == 0)
|
|
|
|
goto unlock;
|
2014-11-19 15:37:47 +00:00
|
|
|
|
2022-04-05 15:53:44 +00:00
|
|
|
/* By definition flush = invalidate + flush */
|
|
|
|
_psr_flush_handle(intel_dp);
|
|
|
|
unlock:
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
}
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
|
|
|
|
2014-11-14 16:52:29 +00:00
|
|
|
/**
|
|
|
|
* intel_psr_init - Init basic PSR work and mutex.
|
2021-02-04 13:40:14 +00:00
|
|
|
* @intel_dp: Intel DP
|
2014-11-14 16:52:29 +00:00
|
|
|
*
|
2021-02-04 13:40:14 +00:00
|
|
|
* This function is called after the initializing connector.
|
|
|
|
* (the initializing of connector treats the handling of connector capabilities)
|
|
|
|
* And it initializes basic PSR stuff for each DP Encoder.
|
2014-11-14 16:52:29 +00:00
|
|
|
*/
|
2021-02-04 13:40:14 +00:00
|
|
|
void intel_psr_init(struct intel_dp *intel_dp)
|
2014-11-14 16:52:28 +00:00
|
|
|
{
|
2022-05-10 10:42:39 +00:00
|
|
|
struct intel_connector *connector = intel_dp->attached_connector;
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
|
2017-09-07 23:00:31 +00:00
|
|
|
return;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
/*
|
|
|
|
* HSW spec explicitly says PSR is tied to port A.
|
|
|
|
* BDW+ platforms have a instance of PSR registers per transcoder but
|
|
|
|
* BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
|
|
|
|
* than eDP one.
|
|
|
|
* For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
|
|
|
|
* So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
|
|
|
|
* But GEN12 supports a instance of PSR registers per transcoder.
|
|
|
|
*/
|
2021-03-20 04:42:42 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
|
2021-02-04 13:40:14 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR condition failed: Port not supported\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:00 +00:00
|
|
|
if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
|
|
|
|
intel_dp->psr.source_panel_replay_support = true;
|
|
|
|
else
|
|
|
|
intel_dp->psr.source_support = true;
|
2021-02-04 13:40:14 +00:00
|
|
|
|
2016-02-01 20:02:08 +00:00
|
|
|
/* Set link_standby x link_off defaults */
|
2021-08-27 17:42:51 +00:00
|
|
|
if (DISPLAY_VER(dev_priv) < 12)
|
2019-08-23 08:20:39 +00:00
|
|
|
/* For new platforms up to TGL let's respect VBT back again */
|
2022-05-10 10:42:39 +00:00
|
|
|
intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
|
2016-02-01 20:02:07 +00:00
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
INIT_WORK(&intel_dp->psr.work, intel_psr_work);
|
|
|
|
INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
|
|
|
|
mutex_init(&intel_dp->psr.lock);
|
2014-11-14 16:52:28 +00:00
|
|
|
}
|
2018-06-26 20:16:41 +00:00
|
|
|
|
2019-11-28 01:48:49 +00:00
|
|
|
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
|
|
|
|
u8 *status, u8 *error_status)
|
|
|
|
{
|
|
|
|
struct drm_dp_aux *aux = &intel_dp->aux;
|
|
|
|
int ret;
|
2023-11-08 07:23:03 +00:00
|
|
|
unsigned int offset;
|
2019-11-28 01:48:49 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
offset = intel_dp->psr.panel_replay_enabled ?
|
|
|
|
DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
|
|
|
|
|
|
|
|
ret = drm_dp_dpcd_readb(aux, offset, status);
|
2019-11-28 01:48:49 +00:00
|
|
|
if (ret != 1)
|
|
|
|
return ret;
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
offset = intel_dp->psr.panel_replay_enabled ?
|
|
|
|
DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
|
|
|
|
|
|
|
|
ret = drm_dp_dpcd_readb(aux, offset, error_status);
|
2019-11-28 01:48:49 +00:00
|
|
|
if (ret != 1)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*status = *status & DP_PSR_SINK_STATE_MASK;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-28 01:48:50 +00:00
|
|
|
static void psr_alpm_check(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
|
|
|
struct drm_dp_aux *aux = &intel_dp->aux;
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
2019-11-28 01:48:50 +00:00
|
|
|
u8 val;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!psr->psr2_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
|
|
|
|
if (r != 1) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm, "Error reading ALPM status\n");
|
2019-11-28 01:48:50 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
|
|
|
|
intel_psr_disable_locked(intel_dp);
|
|
|
|
psr->sink_not_reliable = true;
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"ALPM lock timeout error, disabling PSR\n");
|
2019-11-28 01:48:50 +00:00
|
|
|
|
|
|
|
/* Clearing error */
|
|
|
|
drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 01:48:51 +00:00
|
|
|
static void psr_capability_changed_check(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
2019-11-28 01:48:51 +00:00
|
|
|
u8 val;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
|
|
|
|
if (r != 1) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
|
2019-11-28 01:48:51 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val & DP_PSR_CAPS_CHANGE) {
|
|
|
|
intel_psr_disable_locked(intel_dp);
|
|
|
|
psr->sink_not_reliable = true;
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"Sink PSR capability changed, disabling PSR\n");
|
2019-11-28 01:48:51 +00:00
|
|
|
|
|
|
|
/* Clearing it */
|
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-26 20:16:41 +00:00
|
|
|
void intel_psr_short_pulse(struct intel_dp *intel_dp)
|
|
|
|
{
|
2018-08-27 22:30:21 +00:00
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2021-02-04 13:40:14 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
2019-11-28 01:48:49 +00:00
|
|
|
u8 status, error_status;
|
2018-06-26 20:16:42 +00:00
|
|
|
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
|
2018-06-26 20:16:44 +00:00
|
|
|
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
|
|
|
|
DP_PSR_LINK_CRC_ERROR;
|
2018-06-26 20:16:41 +00:00
|
|
|
|
2021-02-09 18:14:38 +00:00
|
|
|
if (!CAN_PSR(intel_dp))
|
2018-06-26 20:16:41 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
if (!psr->enabled)
|
2018-06-26 20:16:41 +00:00
|
|
|
goto exit;
|
|
|
|
|
2019-11-28 01:48:49 +00:00
|
|
|
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"Error reading PSR status or error status\n");
|
2018-06-26 20:16:41 +00:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2019-11-28 01:48:49 +00:00
|
|
|
if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
|
2018-06-26 20:16:41 +00:00
|
|
|
intel_psr_disable_locked(intel_dp);
|
2018-11-21 22:54:38 +00:00
|
|
|
psr->sink_not_reliable = true;
|
2018-06-26 20:16:41 +00:00
|
|
|
}
|
|
|
|
|
2019-11-28 01:48:49 +00:00
|
|
|
if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR sink internal error, disabling PSR\n");
|
2019-11-28 01:48:49 +00:00
|
|
|
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR RFB storage error, disabling PSR\n");
|
2019-11-28 01:48:49 +00:00
|
|
|
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR VSC SDP uncorrectable error, disabling PSR\n");
|
2019-11-28 01:48:49 +00:00
|
|
|
if (error_status & DP_PSR_LINK_CRC_ERROR)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
"PSR Link CRC error, disabling PSR\n");
|
2018-06-26 20:16:42 +00:00
|
|
|
|
2019-11-28 01:48:49 +00:00
|
|
|
if (error_status & ~errors)
|
drm/i915/psr: automatic conversion to drm_device based logging macros.
Converts instances of the printk based logging macros to the struct
drm_device based logging macros in i915/display/intel_psr.c using the
following coccinelle script that transforms based on the existence of a
drm_i915_private device pointer:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200130083229.12889-11-wambui.karugax@gmail.com
2020-01-30 08:32:27 +00:00
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"PSR_ERROR_STATUS unhandled errors %x\n",
|
|
|
|
error_status & ~errors);
|
2018-06-26 20:16:42 +00:00
|
|
|
/* clear status register */
|
2019-11-28 01:48:49 +00:00
|
|
|
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
|
2019-11-28 01:48:50 +00:00
|
|
|
|
|
|
|
psr_alpm_check(intel_dp);
|
2019-11-28 01:48:51 +00:00
|
|
|
psr_capability_changed_check(intel_dp);
|
2019-11-28 01:48:50 +00:00
|
|
|
|
2018-06-26 20:16:41 +00:00
|
|
|
exit:
|
|
|
|
mutex_unlock(&psr->lock);
|
|
|
|
}
|
2018-11-21 22:54:37 +00:00
|
|
|
|
|
|
|
bool intel_psr_enabled(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
2021-02-09 18:14:38 +00:00
|
|
|
if (!CAN_PSR(intel_dp))
|
2018-11-21 22:54:37 +00:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 13:40:14 +00:00
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
ret = intel_dp->psr.enabled;
|
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
2018-11-21 22:54:37 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2022-04-05 15:53:43 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_psr_lock - grab PSR lock
|
|
|
|
* @crtc_state: the crtc state
|
|
|
|
*
|
|
|
|
* This is initially meant to be used by around CRTC update, when
|
|
|
|
* vblank sensitive registers are updated and we need grab the lock
|
|
|
|
* before it to avoid vblank evasion.
|
|
|
|
*/
|
|
|
|
void intel_psr_lock(const struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
|
|
|
if (!crtc_state->has_psr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
|
|
|
|
crtc_state->uapi.encoder_mask) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
|
|
|
|
mutex_lock(&intel_dp->psr.lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_psr_unlock - release PSR lock
|
|
|
|
* @crtc_state: the crtc state
|
|
|
|
*
|
|
|
|
* Release the PSR lock that was held during pipe update.
|
|
|
|
*/
|
|
|
|
void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
|
|
|
if (!crtc_state->has_psr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
|
|
|
|
crtc_state->uapi.encoder_mask) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
|
|
|
|
mutex_unlock(&intel_dp->psr.lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-03-17 13:41:42 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2023-03-17 13:41:42 +00:00
|
|
|
const char *status = "unknown";
|
|
|
|
u32 val, status_val;
|
|
|
|
|
|
|
|
if (intel_dp->psr.psr2_enabled) {
|
|
|
|
static const char * const live_status[] = {
|
|
|
|
"IDLE",
|
|
|
|
"CAPTURE",
|
|
|
|
"CAPTURE_FS",
|
|
|
|
"SLEEP",
|
|
|
|
"BUFON_FW",
|
|
|
|
"ML_UP",
|
|
|
|
"SU_STANDBY",
|
|
|
|
"FAST_SLEEP",
|
|
|
|
"DEEP_SLEEP",
|
|
|
|
"BUF_ON",
|
|
|
|
"TG_ON"
|
|
|
|
};
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
|
2023-03-17 13:41:42 +00:00
|
|
|
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
|
|
|
|
if (status_val < ARRAY_SIZE(live_status))
|
|
|
|
status = live_status[status_val];
|
|
|
|
} else {
|
|
|
|
static const char * const live_status[] = {
|
|
|
|
"IDLE",
|
|
|
|
"SRDONACK",
|
|
|
|
"SRDENT",
|
|
|
|
"BUFOFF",
|
|
|
|
"BUFON",
|
|
|
|
"AUXACK",
|
|
|
|
"SRDOFFACK",
|
|
|
|
"SRDENT_ON",
|
|
|
|
};
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
|
2023-04-11 19:14:24 +00:00
|
|
|
status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
|
2023-03-17 13:41:42 +00:00
|
|
|
if (status_val < ARRAY_SIZE(live_status))
|
|
|
|
status = live_status[status_val];
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
|
2023-03-17 13:41:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
2023-04-11 19:14:29 +00:00
|
|
|
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
2023-03-17 13:41:42 +00:00
|
|
|
struct intel_psr *psr = &intel_dp->psr;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
const char *status;
|
|
|
|
bool enabled;
|
|
|
|
u32 val;
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "Sink support: PSR = %s",
|
|
|
|
str_yes_no(psr->sink_support));
|
|
|
|
|
2023-03-17 13:41:42 +00:00
|
|
|
if (psr->sink_support)
|
|
|
|
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
if (!(psr->sink_support || psr->sink_panel_replay_support))
|
2023-03-17 13:41:42 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
|
|
|
mutex_lock(&psr->lock);
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
if (psr->panel_replay_enabled)
|
|
|
|
status = "Panel Replay Enabled";
|
|
|
|
else if (psr->enabled)
|
2023-03-17 13:41:42 +00:00
|
|
|
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
|
|
|
|
else
|
|
|
|
status = "disabled";
|
|
|
|
seq_printf(m, "PSR mode: %s\n", status);
|
|
|
|
|
|
|
|
if (!psr->enabled) {
|
|
|
|
seq_printf(m, "PSR sink not reliable: %s\n",
|
|
|
|
str_yes_no(psr->sink_not_reliable));
|
|
|
|
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
if (psr->panel_replay_enabled) {
|
|
|
|
val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
|
|
|
|
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
|
|
|
|
} else if (psr->psr2_enabled) {
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
|
2023-03-17 13:41:42 +00:00
|
|
|
enabled = val & EDP_PSR2_ENABLE;
|
|
|
|
} else {
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
|
2023-03-17 13:41:42 +00:00
|
|
|
enabled = val & EDP_PSR_ENABLE;
|
|
|
|
}
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
|
2023-03-17 13:41:42 +00:00
|
|
|
str_enabled_disabled(enabled), val);
|
|
|
|
psr_source_status(intel_dp, m);
|
|
|
|
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
|
|
|
|
psr->busy_frontbuffer_bits);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SKL+ Perf counter is reset to 0 everytime DC state is entered
|
|
|
|
*/
|
2023-06-09 14:13:54 +00:00
|
|
|
val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
|
2023-04-11 19:14:28 +00:00
|
|
|
seq_printf(m, "Performance counter: %u\n",
|
|
|
|
REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
|
2023-03-17 13:41:42 +00:00
|
|
|
|
|
|
|
if (psr->debug & I915_PSR_DEBUG_IRQ) {
|
|
|
|
seq_printf(m, "Last attempted entry at: %lld\n",
|
|
|
|
psr->last_entry_attempt);
|
|
|
|
seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (psr->psr2_enabled) {
|
|
|
|
u32 su_frames_val[3];
|
|
|
|
int frame;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reading all 3 registers before hand to minimize crossing a
|
|
|
|
* frame boundary between register reads
|
|
|
|
*/
|
|
|
|
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
|
2023-04-11 19:14:29 +00:00
|
|
|
val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
|
2023-03-17 13:41:42 +00:00
|
|
|
su_frames_val[frame / 3] = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
|
|
|
|
|
|
|
|
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
|
|
|
|
u32 su_blocks;
|
|
|
|
|
|
|
|
su_blocks = su_frames_val[frame / 3] &
|
|
|
|
PSR2_SU_STATUS_MASK(frame);
|
|
|
|
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
|
|
|
|
seq_printf(m, "%d\t%d\n", frame, su_blocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "PSR2 selective fetch: %s\n",
|
|
|
|
str_enabled_disabled(psr->psr2_sel_fetch_enabled));
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&psr->lock);
|
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_edp_psr_status_show(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = m->private;
|
|
|
|
struct intel_dp *intel_dp = NULL;
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
|
|
|
if (!HAS_PSR(dev_priv))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Find the first EDP which supports PSR */
|
|
|
|
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
|
|
|
intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!intel_dp)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return intel_psr_status(m, intel_dp);
|
|
|
|
}
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_edp_psr_debug_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = data;
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
int ret = -ENODEV;
|
|
|
|
|
|
|
|
if (!HAS_PSR(dev_priv))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
|
|
|
|
|
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
|
|
|
|
|
|
|
// TODO: split to each transcoder's PSR debug state
|
|
|
|
ret = intel_psr_debug_set(intel_dp, val);
|
|
|
|
|
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_edp_psr_debug_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = data;
|
|
|
|
struct intel_encoder *encoder;
|
|
|
|
|
|
|
|
if (!HAS_PSR(dev_priv))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
|
|
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
|
|
|
|
|
|
|
// TODO: split to each transcoder's PSR debug state
|
|
|
|
*val = READ_ONCE(intel_dp->psr.debug);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
|
|
|
|
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
|
|
|
|
"%llu\n");
|
|
|
|
|
|
|
|
void intel_psr_debugfs_register(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct drm_minor *minor = i915->drm.primary;
|
|
|
|
|
|
|
|
debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
|
|
|
|
i915, &i915_edp_psr_debug_fops);
|
|
|
|
|
|
|
|
debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
|
|
|
|
i915, &i915_edp_psr_status_fops);
|
|
|
|
}
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
static const char *psr_mode_str(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
if (intel_dp->psr.panel_replay_enabled)
|
|
|
|
return "PANEL-REPLAY";
|
|
|
|
else if (intel_dp->psr.enabled)
|
|
|
|
return "PSR";
|
|
|
|
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
|
2023-03-17 13:41:42 +00:00
|
|
|
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
|
|
|
{
|
2023-03-17 13:41:43 +00:00
|
|
|
struct intel_connector *connector = m->private;
|
|
|
|
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
2023-03-17 13:41:42 +00:00
|
|
|
static const char * const sink_status[] = {
|
|
|
|
"inactive",
|
|
|
|
"transition to active, capture and display",
|
|
|
|
"active, display from RFB",
|
|
|
|
"active, capture and display on sink device timings",
|
|
|
|
"transition to inactive, capture and display, timing re-sync",
|
|
|
|
"reserved",
|
|
|
|
"reserved",
|
|
|
|
"sink internal error",
|
|
|
|
};
|
2023-11-08 07:23:03 +00:00
|
|
|
static const char * const panel_replay_status[] = {
|
|
|
|
"Sink device frame is locked to the Source device",
|
|
|
|
"Sink device is coasting, using the VTotal target",
|
|
|
|
"Sink device is governing the frame rate (frame rate unlock is granted)",
|
|
|
|
"Sink device in the process of re-locking with the Source device",
|
|
|
|
};
|
2023-03-17 13:41:44 +00:00
|
|
|
const char *str;
|
2023-03-17 13:41:42 +00:00
|
|
|
int ret;
|
2023-08-28 08:31:07 +00:00
|
|
|
u8 status, error_status;
|
2023-11-08 07:23:03 +00:00
|
|
|
u32 idx;
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
|
|
|
|
seq_puts(m, "PSR/Panel-Replay Unsupported\n");
|
2023-03-17 13:41:42 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2023-03-17 13:41:43 +00:00
|
|
|
if (connector->base.status != connector_status_connected)
|
2023-03-17 13:41:42 +00:00
|
|
|
return -ENODEV;
|
|
|
|
|
2023-08-28 08:31:07 +00:00
|
|
|
ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
str = "unknown";
|
|
|
|
if (intel_dp->psr.panel_replay_enabled) {
|
|
|
|
idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
|
|
|
|
if (idx < ARRAY_SIZE(panel_replay_status))
|
|
|
|
str = panel_replay_status[idx];
|
|
|
|
} else if (intel_dp->psr.enabled) {
|
|
|
|
idx = status & DP_PSR_SINK_STATE_MASK;
|
|
|
|
if (idx < ARRAY_SIZE(sink_status))
|
|
|
|
str = sink_status[idx];
|
|
|
|
}
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
|
2023-08-28 08:31:07 +00:00
|
|
|
|
|
|
|
if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
|
|
|
|
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
|
|
|
|
DP_PSR_LINK_CRC_ERROR))
|
|
|
|
seq_puts(m, ":\n");
|
|
|
|
else
|
|
|
|
seq_puts(m, "\n");
|
|
|
|
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
|
2023-08-28 08:31:07 +00:00
|
|
|
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
|
2023-08-28 08:31:07 +00:00
|
|
|
if (error_status & DP_PSR_LINK_CRC_ERROR)
|
2023-11-08 07:23:03 +00:00
|
|
|
seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
|
2023-08-28 08:31:07 +00:00
|
|
|
|
|
|
|
return ret;
|
2023-03-17 13:41:42 +00:00
|
|
|
}
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
|
|
|
|
|
|
|
|
static int i915_psr_status_show(struct seq_file *m, void *data)
|
|
|
|
{
|
2023-03-17 13:41:43 +00:00
|
|
|
struct intel_connector *connector = m->private;
|
|
|
|
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
2023-03-17 13:41:42 +00:00
|
|
|
|
|
|
|
return intel_psr_status(m, intel_dp);
|
|
|
|
}
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
|
|
|
|
|
2023-03-17 13:41:43 +00:00
|
|
|
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
|
2023-03-17 13:41:42 +00:00
|
|
|
{
|
2023-03-17 13:41:43 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
|
|
|
struct dentry *root = connector->base.debugfs_entry;
|
2023-03-17 13:41:42 +00:00
|
|
|
|
2024-01-03 15:26:09 +00:00
|
|
|
/* TODO: Add support for MST connectors as well. */
|
|
|
|
if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
|
|
|
|
connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
|
|
|
|
connector->mst_port)
|
|
|
|
return;
|
2023-03-17 13:41:42 +00:00
|
|
|
|
|
|
|
debugfs_create_file("i915_psr_sink_status", 0444, root,
|
|
|
|
connector, &i915_psr_sink_status_fops);
|
|
|
|
|
2023-11-08 07:23:03 +00:00
|
|
|
if (HAS_PSR(i915) || HAS_DP20(i915))
|
2023-03-17 13:41:42 +00:00
|
|
|
debugfs_create_file("i915_psr_status", 0444, root,
|
|
|
|
connector, &i915_psr_status_fops);
|
|
|
|
}
|