linux-stable/drivers/gpu/drm/i915/display/intel_display_power.c

2519 lines
75 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
#include "vlv_sideband.h"
#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
for_each_power_well(__dev_priv, __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
case POWER_DOMAIN_PIPE_A:
return "PIPE_A";
case POWER_DOMAIN_PIPE_B:
return "PIPE_B";
case POWER_DOMAIN_PIPE_C:
return "PIPE_C";
case POWER_DOMAIN_PIPE_D:
return "PIPE_D";
case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
return "PIPE_PANEL_FITTER_A";
case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
return "PIPE_PANEL_FITTER_B";
case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
return "PIPE_PANEL_FITTER_C";
case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
return "PIPE_PANEL_FITTER_D";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
return "TRANSCODER_B";
case POWER_DOMAIN_TRANSCODER_C:
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_D:
return "TRANSCODER_D";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
return "TRANSCODER_DSI_C";
case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
return "TRANSCODER_VDSC_PW2";
case POWER_DOMAIN_PORT_DDI_LANES_A:
return "PORT_DDI_LANES_A";
case POWER_DOMAIN_PORT_DDI_LANES_B:
return "PORT_DDI_LANES_B";
case POWER_DOMAIN_PORT_DDI_LANES_C:
return "PORT_DDI_LANES_C";
case POWER_DOMAIN_PORT_DDI_LANES_D:
return "PORT_DDI_LANES_D";
case POWER_DOMAIN_PORT_DDI_LANES_E:
return "PORT_DDI_LANES_E";
case POWER_DOMAIN_PORT_DDI_LANES_F:
return "PORT_DDI_LANES_F";
case POWER_DOMAIN_PORT_DDI_LANES_TC1:
return "PORT_DDI_LANES_TC1";
case POWER_DOMAIN_PORT_DDI_LANES_TC2:
return "PORT_DDI_LANES_TC2";
case POWER_DOMAIN_PORT_DDI_LANES_TC3:
return "PORT_DDI_LANES_TC3";
case POWER_DOMAIN_PORT_DDI_LANES_TC4:
return "PORT_DDI_LANES_TC4";
case POWER_DOMAIN_PORT_DDI_LANES_TC5:
return "PORT_DDI_LANES_TC5";
case POWER_DOMAIN_PORT_DDI_LANES_TC6:
return "PORT_DDI_LANES_TC6";
case POWER_DOMAIN_PORT_DDI_IO_A:
return "PORT_DDI_IO_A";
case POWER_DOMAIN_PORT_DDI_IO_B:
return "PORT_DDI_IO_B";
case POWER_DOMAIN_PORT_DDI_IO_C:
return "PORT_DDI_IO_C";
case POWER_DOMAIN_PORT_DDI_IO_D:
return "PORT_DDI_IO_D";
case POWER_DOMAIN_PORT_DDI_IO_E:
return "PORT_DDI_IO_E";
case POWER_DOMAIN_PORT_DDI_IO_F:
return "PORT_DDI_IO_F";
case POWER_DOMAIN_PORT_DDI_IO_TC1:
return "PORT_DDI_IO_TC1";
case POWER_DOMAIN_PORT_DDI_IO_TC2:
return "PORT_DDI_IO_TC2";
case POWER_DOMAIN_PORT_DDI_IO_TC3:
return "PORT_DDI_IO_TC3";
case POWER_DOMAIN_PORT_DDI_IO_TC4:
return "PORT_DDI_IO_TC4";
case POWER_DOMAIN_PORT_DDI_IO_TC5:
return "PORT_DDI_IO_TC5";
case POWER_DOMAIN_PORT_DDI_IO_TC6:
return "PORT_DDI_IO_TC6";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
return "PORT_CRT";
case POWER_DOMAIN_PORT_OTHER:
return "PORT_OTHER";
case POWER_DOMAIN_VGA:
return "VGA";
case POWER_DOMAIN_AUDIO_MMIO:
return "AUDIO_MMIO";
case POWER_DOMAIN_AUDIO_PLAYBACK:
return "AUDIO_PLAYBACK";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
case POWER_DOMAIN_AUX_IO_B:
return "AUX_IO_B";
case POWER_DOMAIN_AUX_IO_C:
return "AUX_IO_C";
case POWER_DOMAIN_AUX_IO_D:
return "AUX_IO_D";
case POWER_DOMAIN_AUX_IO_E:
return "AUX_IO_E";
case POWER_DOMAIN_AUX_IO_F:
return "AUX_IO_F";
case POWER_DOMAIN_AUX_A:
return "AUX_A";
case POWER_DOMAIN_AUX_B:
return "AUX_B";
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
case POWER_DOMAIN_AUX_E:
return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
case POWER_DOMAIN_AUX_USBC1:
return "AUX_USBC1";
case POWER_DOMAIN_AUX_USBC2:
return "AUX_USBC2";
case POWER_DOMAIN_AUX_USBC3:
return "AUX_USBC3";
case POWER_DOMAIN_AUX_USBC4:
return "AUX_USBC4";
case POWER_DOMAIN_AUX_USBC5:
return "AUX_USBC5";
case POWER_DOMAIN_AUX_USBC6:
return "AUX_USBC6";
case POWER_DOMAIN_AUX_TBT1:
return "AUX_TBT1";
case POWER_DOMAIN_AUX_TBT2:
return "AUX_TBT2";
case POWER_DOMAIN_AUX_TBT3:
return "AUX_TBT3";
case POWER_DOMAIN_AUX_TBT4:
return "AUX_TBT4";
case POWER_DOMAIN_AUX_TBT5:
return "AUX_TBT5";
case POWER_DOMAIN_AUX_TBT6:
return "AUX_TBT6";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
return "INIT";
case POWER_DOMAIN_MODESET:
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
case POWER_DOMAIN_DC_OFF:
return "DC_OFF";
case POWER_DOMAIN_TC_COLD_OFF:
return "TC_COLD_OFF";
default:
MISSING_CASE(domain);
return "?";
}
}
/**
* __intel_display_power_is_enabled - unlocked check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
* This is the unlocked version of intel_display_power_is_enabled() and should
* only be used from error capture and recovery code where deadlocks are
* possible.
*
* Returns:
* True when the power domain is enabled, false otherwise.
*/
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
if (dev_priv->runtime_pm.suspended)
return false;
is_enabled = true;
for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
if (intel_power_well_is_always_on(power_well))
continue;
if (!intel_power_well_is_enabled_cached(power_well)) {
is_enabled = false;
break;
}
}
return is_enabled;
}
/**
* intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
* This function can be used to check the hw power domain state. It is mostly
* used in hardware state readout functions. Everywhere else code should rely
* upon explicit power domain reference counting to ensure that the hardware
* block is powered up before accessing it.
*
* Callers must hold the relevant modesetting locks to ensure that concurrent
* threads can't disable the power well while the caller tries to read a few
* registers.
*
* Returns:
* True when the power domain is enabled, false otherwise.
*/
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
bool ret;
power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(dev_priv, domain);
mutex_unlock(&power_domains->lock);
return ret;
}
static u32
sanitize_target_dc_state(struct drm_i915_private *dev_priv,
u32 target_dc_state)
{
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
DC_STATE_EN_DC3CO,
DC_STATE_DISABLE,
};
int i;
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
if (target_dc_state != states[i])
continue;
if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
}
return target_dc_state;
}
/**
* intel_display_power_set_target_dc_state - Set target dc state.
* @dev_priv: i915 device
* @state: state which needs to be set as target_dc_state.
*
* This function set the "DC off" power well target_dc_state,
* based upon this target_dc_stste, "DC off" power well will
* enable desired DC state.
*/
void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
u32 state)
{
struct i915_power_well *power_well;
bool dc_off_enabled;
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
if (drm_WARN_ON(&dev_priv->drm, !power_well))
goto unlock;
state = sanitize_target_dc_state(dev_priv, state);
if (state == dev_priv->display.dmc.target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
/*
* If DC off power well is disabled, need to enable and disable the
* DC off power well to effect target DC state.
*/
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
dev_priv->display.dmc.target_dc_state = state;
drm/i915/display: Defer application of initial chv_phy_control To write to the DISPLAY_PHY_CONTROL requires holding the powerwells, which during early resume we have not yet acquired until later in intel_display_power_init_hw(). So compute the initial chv_phy_control, but leave the HW unset until we first acquire the powerwell. <7> [120.055984] i915 0000:00:02.0: [drm:intel_power_domains_init_hw [i915]] rawclk rate: 200000 kHz <4> [120.056381] ------------[ cut here ]------------ <4> [120.056621] i915 0000:00:02.0: Unclaimed write to register 0x1e0100 <4> [120.056924] WARNING: CPU: 1 PID: 164 at drivers/gpu/drm/i915/intel_uncore.c:1166 __unclaimed_reg_debug+0x69/0x80 [i915] <4> [120.056935] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic btusb btrtl btbcm btintel i915 bluetooth coretemp crct10dif_pclmul crc32_pclmul snd_hda_intel snd_intel_dspcfg snd_hda_codec ghash_clmulni_intel snd_hwdep ecdh_generic ecc snd_hda_core r8169 snd_pcm lpc_ich realtek pinctrl_cherryview i2c_designware_pci prime_numbers <4> [120.057027] CPU: 1 PID: 164 Comm: kworker/u4:3 Tainted: G U 5.5.0-CI-CI_DRM_7854+ #1 <4> [120.057038] Hardware name: /NUC5CPYB, BIOS PYBSWCEL.86A.0055.2016.0812.1130 08/12/2016 <4> [120.057058] Workqueue: events_unbound async_run_entry_fn <4> [120.057275] RIP: 0010:__unclaimed_reg_debug+0x69/0x80 [i915] <4> [120.057289] Code: 48 8b 78 18 48 8b 5f 50 48 85 db 74 2d e8 1f a0 3f e1 45 89 e8 48 89 e9 48 89 da 48 89 c6 48 c7 c7 00 8c 48 a0 e8 67 82 df e0 <0f> 0b 83 2d ce e2 2b 00 01 5b 5d 41 5c 41 5d c3 48 8b 1f eb ce 66 <4> [120.057301] RSP: 0018:ffffc90000bcfd08 EFLAGS: 00010082 <4> [120.057315] RAX: 0000000000000000 RBX: ffff888079919b60 RCX: 0000000000000003 <4> [120.057326] RDX: 0000000080000003 RSI: 0000000000000000 RDI: 00000000ffffffff <4> [120.057336] RBP: ffffffffa04c9f4e R08: 0000000000000000 R09: 0000000000000001 <4> [120.057348] R10: 0000000025c3d560 R11: 000000006815f798 R12: 0000000000000000 <4> [120.057359] R13: 00000000001e0100 R14: 0000000000000286 R15: ffffffff8234a76b <4> [120.057371] FS: 0000000000000000(0000) GS:ffff888074b00000(0000) knlGS:0000000000000000 <4> [120.057382] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [120.057393] CR2: 000055f4197df0d8 CR3: 000000006f326000 CR4: 00000000001006e0 <4> [120.057404] Call Trace: <4> [120.057635] fwtable_write32+0x114/0x1d0 [i915] <4> [120.057892] intel_power_domains_init_hw+0x4ff/0x650 [i915] <4> [120.058150] intel_power_domains_resume+0x3d/0x70 [i915] <4> [120.058363] i915_drm_resume_early+0x97/0xd0 [i915] <4> [120.058575] ? i915_resume_switcheroo+0x30/0x30 [i915] <4> [120.058594] dpm_run_callback+0x64/0x280 <4> [120.058626] device_resume_early+0xa7/0xe0 <4> [120.058652] async_resume_early+0x14/0x40 v2: Write our expected value of DISPLAY_PHY_CONTROL during sync_hw, so that it should always match the driver state after resume. Closes: https://gitlab.freedesktop.org/drm/intel/issues/1089 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200203145016.216692-1-chris@chris-wilson.co.uk
2020-02-03 14:50:16 +00:00
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
unlock:
mutex_unlock(&power_domains->lock);
}
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
bitmap_or(mask->bits,
power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
display.power.domains);
return !drm_WARN_ON(&i915->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM));
}
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
display.power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
__async_put_domains_mask(power_domains, &async_put_mask);
err |= drm_WARN_ON(&i915->drm,
!!power_domains->async_put_wakeref !=
!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, &async_put_mask)
err |= drm_WARN_ON(&i915->drm,
power_domains->domain_use_count[domain] != 1);
return !err;
}
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, struct intel_power_domain_mask *mask)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
display.power.domains);
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
drm_dbg(&i915->drm, "%s use_count %d\n",
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
print_power_domains(power_domains, "async_put_domains[1]",
&power_domains->async_put_domains[1]);
}
static void
verify_async_put_domains_state(struct i915_power_domains *power_domains)
{
if (!__async_put_domains_state_ok(power_domains))
print_async_put_domains_state(power_domains);
}
#else
static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
}
static void
verify_async_put_domains_state(struct i915_power_domains *power_domains)
{
}
#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
static void async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
assert_async_put_domain_masks_disjoint(power_domains);
__async_put_domains_mask(power_domains, mask);
}
static void
async_put_domains_clear_domain(struct i915_power_domains *power_domains,
enum intel_display_power_domain domain)
{
assert_async_put_domain_masks_disjoint(power_domains);
clear_bit(domain, power_domains->async_put_domains[0].bits);
clear_bit(domain, power_domains->async_put_domains[1].bits);
}
static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
async_put_domains_mask(power_domains, &async_put_mask);
if (!test_bit(domain, async_put_mask.bits))
goto out_verify;
async_put_domains_clear_domain(power_domains, domain);
ret = true;
async_put_domains_mask(power_domains, &async_put_mask);
if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
goto out_verify;
cancel_delayed_work(&power_domains->async_put_work);
intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
return ret;
}
static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
return;
for_each_power_domain_well(dev_priv, power_well, domain)
intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
}
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
* power domain and all its parents are powered up. Therefore users should only
* grab a reference to the innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
return wakeref;
}
/**
* intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
* power domain and all its parents are powered up. Therefore users should only
* grab a reference to the innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return false;
mutex_lock(&power_domains->lock);
if (__intel_display_power_is_enabled(dev_priv, domain)) {
__intel_display_power_get_domain(dev_priv, domain);
is_enabled = true;
} else {
is_enabled = false;
}
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
wakeref = 0;
}
return wakeref;
}
static void
__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
power_domains = &dev_priv->display.power.domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
async_put_domains_mask(power_domains, &async_put_mask);
drm_WARN(&dev_priv->drm,
test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
for_each_power_domain_well_reverse(dev_priv, power_well, domain)
intel_power_well_put(dev_priv, power_well);
}
static void __intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
__intel_display_power_put_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
}
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
display.power.domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
&power_domains->async_put_work,
msecs_to_jiffies(100)));
}
static void
release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
display.power.domains);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
/*
* The caller must hold already raw wakeref, upgrade that to a proper
* wakeref to make the state checker happy about the HW access during
* power well disabling.
*/
assert_rpm_raw_wakeref_held(rpm);
wakeref = intel_runtime_pm_get(rpm);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
async_put_domains_clear_domain(power_domains, domain);
__intel_display_power_put_domain(dev_priv, domain);
}
intel_runtime_pm_put(rpm, wakeref);
}
static void
intel_display_power_put_async_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
display.power.domains.async_put_work.work);
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = 0;
mutex_lock(&power_domains->lock);
/*
* Bail out if all the domain refs pending to be released were grabbed
* by subsequent gets or a flush_work.
*/
old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
if (!old_work_wakeref)
goto out_verify;
release_async_put_domains(power_domains,
&power_domains->async_put_domains[0]);
/* Requeue the work if more domains were async put meanwhile. */
if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
bitmap_copy(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
bitmap_zero(power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref));
} else {
/*
* Cancel the work that got queued after this one got dequeued,
* since here we released the corresponding async-put reference.
*/
cancel_delayed_work(&power_domains->async_put_work);
}
out_verify:
verify_async_put_domains_state(power_domains);
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
intel_runtime_pm_put_raw(rpm, old_work_wakeref);
if (new_work_wakeref)
intel_runtime_pm_put_raw(rpm, new_work_wakeref);
}
/**
* intel_display_power_put_async - release a power domain reference asynchronously
* @i915: i915 device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
*
* This function drops the power domain reference obtained by
* intel_display_power_get*() and schedules a work to power down the
* corresponding hardware block if this is the last reference.
*/
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
mutex_lock(&power_domains->lock);
if (power_domains->domain_use_count[domain] > 1) {
__intel_display_power_put_domain(i915, domain);
goto out_verify;
}
drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
set_bit(domain, power_domains->async_put_domains[1].bits);
} else {
set_bit(domain, power_domains->async_put_domains[0].bits);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&work_wakeref));
}
out_verify:
verify_async_put_domains_state(power_domains);
mutex_unlock(&power_domains->lock);
if (work_wakeref)
intel_runtime_pm_put_raw(rpm, work_wakeref);
intel_runtime_pm_put(rpm, wakeref);
}
/**
* intel_display_power_flush_work - flushes the async display power disabling work
* @i915: i915 device instance
*
* Flushes any pending work that was scheduled by a preceding
* intel_display_power_put_async() call, completing the disabling of the
* corresponding power domains.
*
* Note that the work handler function may still be running after this
* function returns; to ensure that the work handler isn't running use
* intel_display_power_flush_work_sync() instead.
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
mutex_lock(&power_domains->lock);
work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
if (!work_wakeref)
goto out_verify;
async_put_domains_mask(power_domains, &async_put_mask);
release_async_put_domains(power_domains, &async_put_mask);
cancel_delayed_work(&power_domains->async_put_work);
out_verify:
verify_async_put_domains_state(power_domains);
mutex_unlock(&power_domains->lock);
if (work_wakeref)
intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
}
/**
* intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
* @i915: i915 device instance
*
* Like intel_display_power_flush_work(), but also ensure that the work
* handler function is not running any more when this function returns.
*/
static void
intel_display_power_flush_work_sync(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_display_power_flush_work(i915);
cancel_delayed_work_sync(&power_domains->async_put_work);
verify_async_put_domains_state(power_domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/**
* intel_display_power_put - release a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
#else
/**
* intel_display_power_put_unchecked - release an unchecked power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*
* This function is only for the power domain code's internal use to suppress wakeref
* tracking when the correspondig debug kconfig option is disabled, should not
* be used otherwise.
*/
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
#endif
void
intel_display_power_get_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
intel_wakeref_t __maybe_unused wf;
drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
set_bit(domain, power_domain_set->mask.bits);
}
bool
intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
intel_wakeref_t wf;
drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
return false;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
set_bit(domain, power_domain_set->mask.bits);
return true;
}
void
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
enum intel_display_power_domain domain;
drm_WARN_ON(&i915->drm,
!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
intel_wakeref_t __maybe_unused wf = -1;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
#endif
intel_display_power_put(i915, domain, wf);
clear_bit(domain, power_domain_set->mask.bits);
}
}
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
{
if (disable_power_well >= 0)
return !!disable_power_well;
return 1;
}
static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int enable_dc)
{
u32 mask;
int requested_dc;
int max_dc;
if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_DG2(dev_priv))
max_dc = 1;
else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
max_dc = 1;
else if (DISPLAY_VER(dev_priv) >= 9)
max_dc = 2;
else
max_dc = 0;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
if (!dev_priv->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
requested_dc = enable_dc;
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
drm_dbg_kms(&dev_priv->drm,
"Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
} else {
drm_err(&dev_priv->drm,
"Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
switch (requested_dc) {
case 4:
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
break;
case 3:
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
break;
case 2:
mask |= DC_STATE_EN_UPTO_DC6;
break;
case 1:
mask |= DC_STATE_EN_UPTO_DC5;
break;
}
drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
/**
* intel_power_domains_init - initializes the power domain structures
* @dev_priv: i915 device instance
*
* Initializes the power domain structures for @dev_priv depending upon the
* supported platform.
*/
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
dev_priv->display.dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
dev_priv->display.dmc.target_dc_state =
drm/i915/tgl: Enable DC3CO state in "DC Off" power well Add target_dc_state and used by set_target_dc_state API in order to enable DC3CO state with existing DC states. target_dc_state will enable/disable the desired DC state in DC_STATE_EN reg when "DC Off" power well gets disable/enable. v2: commit log improvement. v3: Used intel_wait_for_register to wait for DC3CO exit. [Imre] Used gen9_set_dc_state() to allow/disallow DC3CO. [Imre] Moved transcoder psr2 exit line enablement from tgl_allow_dc3co() to a appropriate place haswell_crtc_enable(). [Imre] Changed the DC3CO power well enabled call back logic as recommended in review comments. [Imre] v4: Used wait_for_us() instead of intel_wait_for_reg(). [Imre (IRC)] v5: using udelay() instead of waiting for DC3CO exit status. v6: Fixed minor unwanted change. v7: Removed DC3CO powerwell and POWER_DOMAIN_VIDEO. v8: Uniform checks by using only target_dc_state instead of allowed_dc_mask in "DC off" power well callback. [Imre] Adding "DC off" power well id to older platforms. [Imre] Removed psr2_deep_sleep flag from tgl_set_target_dc_state. [Imre] v9: Used switch case for target DC state in gen9_dc_off_power_well_disable(), checking DC3CO state against allowed DC mask, using WARN_ON() in tgl_set_target_dc_state(). [Imre] v10: Code refactoring and using sanitize_target_dc_state(). [Imre] Cc: Jani Nikula <jani.nikula@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Animesh Manna <animesh.manna@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-4-anshuman.gupta@intel.com
2019-10-03 08:17:35 +00:00
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
INIT_DELAYED_WORK(&power_domains->async_put_work,
intel_display_power_put_async_work);
return intel_display_power_map_init(power_domains);
}
/**
* intel_power_domains_cleanup - clean up power domains resources
* @dev_priv: i915 device instance
*
* Release any resources acquired by intel_power_domains_init()
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
intel_display_power_map_cleanup(&dev_priv->display.power.domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
for_each_power_well(dev_priv, power_well)
intel_power_well_sync_hw(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
}
static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
enum dbuf_slice slice, bool enable)
{
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
enable ? DBUF_POWER_REQUEST : 0);
intel_de_posting_read(dev_priv, reg);
udelay(10);
state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
drm_WARN(&dev_priv->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
slice, str_enable_disable(enable));
}
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
"Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
req_slices, slice_mask);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
drm/i915: Manipulate DBuf slices properly Start manipulating DBuf slices as a mask, but not as a total number, as current approach doesn't give us full control on all combinations of slices, which we might need(like enabling S2 only can't enabled by setting enabled_slices=1). Removed wrong code from intel_get_ddb_size as it doesn't match to BSpec. For now still just use DBuf slice until proper algorithm is implemented. Other minor code refactoring to get prepared for major DBuf assignment changes landed: - As now enabled slices contain a mask we still need some value which should reflect how much DBuf slices are supported by the platform, now device info contains num_supported_dbuf_slices. - Removed unneeded assertion as we are now manipulating slices in a more proper way. v2: Start using enabled_slices in dev_priv v3: "enabled_slices" is now "enabled_dbuf_slices_mask", as this now sits in dev_priv independently. v4: - Fixed debug print formatting to hex(Matt Roper) - Optimized dbuf slice updates to be used only if slice union is different from current conf(Matt Roper) - Fixed some functions to be static(Matt Roper) - Created a parameterized version for DBUF_CTL to simplify DBuf programming cycle(Matt Roper) - Removed unrequred field from GEN10_FEATURES(Matt Roper) v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä) - Started to use parameterized loop for hw readout to get slices (Ville Syrjälä) - Added back assertion checking amount of DBUF slices enabled after DC states 5/6 transition, also added new assertion as starting from ICL DMC seems to restore the last DBuf power state set, rather than power up all dbuf slices as assertion was previously expecting(Ville Syrjälä) v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä) - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled back, as we really need to have a single unified assert here however currently enabling always slice 1 is enforced by BSpec, so we will have to OR enabled slices mask with 1 in order to be consistent with BSpec, that way we can unify that assertion and against the actual state from the driver, but not some hardcoded value.(concluded with Ville) - Remove parameterized DBUF_CTL version, to extract it to another patch.(Ville Syrjälä) v7: - Removed unneeded hardcoded return value for older gens from intel_enabled_dbuf_slices_mask - this now is handled in a unified manner since device info anyway returns max dbuf slices as 1 for older platforms(Matthew Roper) - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead of intel_dbuf_max_slices function as it is trivial(Matthew Roper) v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä) v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä) - Now using power_domain mutex to protect from race condition, which can occur because intel_dbuf_slices_update might be running in parallel to gen9_dc_off_power_well_enable being called from intel_dp_detect for instance, which causes assertion triggered by race condition, as gen9_assert_dbuf_enabled might preempt this when registers were already updated, while dev_priv was not. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-02 23:06:29 +00:00
/*
* Might be running this in parallel to gen9_dc_off_power_well_enable
* being called from intel_dp_detect for instance,
* which causes assertion triggered by race condition,
* as gen9_assert_dbuf_enabled might preempt this when registers
* were already updated, while dev_priv was not.
*/
mutex_lock(&power_domains->lock);
for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
dev_priv->display.dbuf.enabled_slices = req_slices;
drm/i915: Manipulate DBuf slices properly Start manipulating DBuf slices as a mask, but not as a total number, as current approach doesn't give us full control on all combinations of slices, which we might need(like enabling S2 only can't enabled by setting enabled_slices=1). Removed wrong code from intel_get_ddb_size as it doesn't match to BSpec. For now still just use DBuf slice until proper algorithm is implemented. Other minor code refactoring to get prepared for major DBuf assignment changes landed: - As now enabled slices contain a mask we still need some value which should reflect how much DBuf slices are supported by the platform, now device info contains num_supported_dbuf_slices. - Removed unneeded assertion as we are now manipulating slices in a more proper way. v2: Start using enabled_slices in dev_priv v3: "enabled_slices" is now "enabled_dbuf_slices_mask", as this now sits in dev_priv independently. v4: - Fixed debug print formatting to hex(Matt Roper) - Optimized dbuf slice updates to be used only if slice union is different from current conf(Matt Roper) - Fixed some functions to be static(Matt Roper) - Created a parameterized version for DBUF_CTL to simplify DBuf programming cycle(Matt Roper) - Removed unrequred field from GEN10_FEATURES(Matt Roper) v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä) - Started to use parameterized loop for hw readout to get slices (Ville Syrjälä) - Added back assertion checking amount of DBUF slices enabled after DC states 5/6 transition, also added new assertion as starting from ICL DMC seems to restore the last DBuf power state set, rather than power up all dbuf slices as assertion was previously expecting(Ville Syrjälä) v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä) - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled back, as we really need to have a single unified assert here however currently enabling always slice 1 is enforced by BSpec, so we will have to OR enabled slices mask with 1 in order to be consistent with BSpec, that way we can unify that assertion and against the actual state from the driver, but not some hardcoded value.(concluded with Ville) - Remove parameterized DBUF_CTL version, to extract it to another patch.(Ville Syrjälä) v7: - Removed unneeded hardcoded return value for older gens from intel_enabled_dbuf_slices_mask - this now is handled in a unified manner since device info anyway returns max dbuf slices as 1 for older platforms(Matthew Roper) - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead of intel_dbuf_max_slices function as it is trivial(Matthew Roper) v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä) v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä) - Now using power_domain mutex to protect from race condition, which can occur because intel_dbuf_slices_update might be running in parallel to gen9_dc_off_power_well_enable being called from intel_dp_detect for instance, which causes assertion triggered by race condition, as gen9_assert_dbuf_enabled might preempt this when registers were already updated, while dev_priv was not. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-02 23:06:29 +00:00
mutex_unlock(&power_domains->lock);
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
dev_priv->display.dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
drm/i915: Manipulate DBuf slices properly Start manipulating DBuf slices as a mask, but not as a total number, as current approach doesn't give us full control on all combinations of slices, which we might need(like enabling S2 only can't enabled by setting enabled_slices=1). Removed wrong code from intel_get_ddb_size as it doesn't match to BSpec. For now still just use DBuf slice until proper algorithm is implemented. Other minor code refactoring to get prepared for major DBuf assignment changes landed: - As now enabled slices contain a mask we still need some value which should reflect how much DBuf slices are supported by the platform, now device info contains num_supported_dbuf_slices. - Removed unneeded assertion as we are now manipulating slices in a more proper way. v2: Start using enabled_slices in dev_priv v3: "enabled_slices" is now "enabled_dbuf_slices_mask", as this now sits in dev_priv independently. v4: - Fixed debug print formatting to hex(Matt Roper) - Optimized dbuf slice updates to be used only if slice union is different from current conf(Matt Roper) - Fixed some functions to be static(Matt Roper) - Created a parameterized version for DBUF_CTL to simplify DBuf programming cycle(Matt Roper) - Removed unrequred field from GEN10_FEATURES(Matt Roper) v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä) - Started to use parameterized loop for hw readout to get slices (Ville Syrjälä) - Added back assertion checking amount of DBUF slices enabled after DC states 5/6 transition, also added new assertion as starting from ICL DMC seems to restore the last DBuf power state set, rather than power up all dbuf slices as assertion was previously expecting(Ville Syrjälä) v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä) - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled back, as we really need to have a single unified assert here however currently enabling always slice 1 is enforced by BSpec, so we will have to OR enabled slices mask with 1 in order to be consistent with BSpec, that way we can unify that assertion and against the actual state from the driver, but not some hardcoded value.(concluded with Ville) - Remove parameterized DBUF_CTL version, to extract it to another patch.(Ville Syrjälä) v7: - Removed unneeded hardcoded return value for older gens from intel_enabled_dbuf_slices_mask - this now is handled in a unified manner since device info anyway returns max dbuf slices as 1 for older platforms(Matthew Roper) - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead of intel_dbuf_max_slices function as it is trivial(Matthew Roper) v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä) v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä) - Now using power_domain mutex to protect from race condition, which can occur because intel_dbuf_slices_update might be running in parallel to gen9_dc_off_power_well_enable being called from intel_dp_detect for instance, which causes assertion triggered by race condition, as gen9_assert_dbuf_enabled might preempt this when registers were already updated, while dev_priv was not. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-02 23:06:29 +00:00
/*
* Just power up at least 1 slice, we will
drm/i915: Manipulate DBuf slices properly Start manipulating DBuf slices as a mask, but not as a total number, as current approach doesn't give us full control on all combinations of slices, which we might need(like enabling S2 only can't enabled by setting enabled_slices=1). Removed wrong code from intel_get_ddb_size as it doesn't match to BSpec. For now still just use DBuf slice until proper algorithm is implemented. Other minor code refactoring to get prepared for major DBuf assignment changes landed: - As now enabled slices contain a mask we still need some value which should reflect how much DBuf slices are supported by the platform, now device info contains num_supported_dbuf_slices. - Removed unneeded assertion as we are now manipulating slices in a more proper way. v2: Start using enabled_slices in dev_priv v3: "enabled_slices" is now "enabled_dbuf_slices_mask", as this now sits in dev_priv independently. v4: - Fixed debug print formatting to hex(Matt Roper) - Optimized dbuf slice updates to be used only if slice union is different from current conf(Matt Roper) - Fixed some functions to be static(Matt Roper) - Created a parameterized version for DBUF_CTL to simplify DBuf programming cycle(Matt Roper) - Removed unrequred field from GEN10_FEATURES(Matt Roper) v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä) - Started to use parameterized loop for hw readout to get slices (Ville Syrjälä) - Added back assertion checking amount of DBUF slices enabled after DC states 5/6 transition, also added new assertion as starting from ICL DMC seems to restore the last DBuf power state set, rather than power up all dbuf slices as assertion was previously expecting(Ville Syrjälä) v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä) - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled back, as we really need to have a single unified assert here however currently enabling always slice 1 is enforced by BSpec, so we will have to OR enabled slices mask with 1 in order to be consistent with BSpec, that way we can unify that assertion and against the actual state from the driver, but not some hardcoded value.(concluded with Ville) - Remove parameterized DBUF_CTL version, to extract it to another patch.(Ville Syrjälä) v7: - Removed unneeded hardcoded return value for older gens from intel_enabled_dbuf_slices_mask - this now is handled in a unified manner since device info anyway returns max dbuf slices as 1 for older platforms(Matthew Roper) - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead of intel_dbuf_max_slices function as it is trivial(Matthew Roper) v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä) v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä) - Now using power_domain mutex to protect from race condition, which can occur because intel_dbuf_slices_update might be running in parallel to gen9_dc_off_power_well_enable being called from intel_dp_detect for instance, which causes assertion triggered by race condition, as gen9_assert_dbuf_enabled might preempt this when registers were already updated, while dev_priv was not. Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-02 23:06:29 +00:00
* figure out later which slices we have and what we need.
*/
gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
dev_priv->display.dbuf.enabled_slices);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
gen9_dbuf_slices_update(dev_priv, 0);
}
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
{
enum dbuf_slice slice;
if (IS_ALDERLAKE_P(dev_priv))
return;
for_each_dbuf_slice(dev_priv, slice)
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
u32 mask, val, i;
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
MBUS_ABOX_BW_CREDIT_MASK;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
/*
* gen12 platforms that use abox1 and abox2 for pixel data reads still
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
drm/i915/display: rename display version macros While converting the rest of the driver to use GRAPHICS_VER() and MEDIA_VER(), following what was done for display, some discussions went back on what we did for display: 1) Why is the == comparison special that deserves a separate macro instead of just getting the version and comparing directly like is done for >, >=, <=? 2) IS_DISPLAY_RANGE() is weird in that it omits the "_VER" for brevity. If we remove the current users of IS_DISPLAY_VER(), we could actually repurpose it for a range check With (1) there could be an advantage if we used gen_mask since multiple conditionals be combined by the compiler in a single and instruction and check the result. However a) INTEL_GEN() doesn't use the mask since it would make the code bigger everywhere else and b) in the cases it made sense, it also made sense to convert to the _RANGE() variant. So here we repurpose IS_DISPLAY_VER() to work with a [ from, to ] range like was the IS_DISPLAY_RANGE() and convert the current IS_DISPLAY_VER() users to use == and != operators. Aside from the definition changes, this was done by the following semantic patch: @@ expression dev_priv, E1; @@ - !IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) != E1 @@ expression dev_priv, E1; @@ - IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) == E1 @@ expression dev_priv, from, until; @@ - IS_DISPLAY_RANGE(dev_priv, from, until) + IS_DISPLAY_VER(dev_priv, from, until) Cc: Jani Nikula <jani.nikula@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> [Jani: Minor conflict resolve while applying.] Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-4-lucas.demarchi@intel.com
2021-04-13 05:09:53 +00:00
if (DISPLAY_VER(dev_priv) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
{
u32 val = intel_de_read(dev_priv, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
* something is wrong. Don't even try to turn it on.
*/
if (val & LCPLL_CD_SOURCE_FCLK)
drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
drm_err(&dev_priv->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
for_each_intel_crtc(&dev_priv->drm, crtc)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
"Display power well on\n");
I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
"SPLL enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
"WRPLL1 enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
"WRPLL2 enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
"Panel power on\n");
I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev_priv))
I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
"PCH GTC enabled\n");
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
* interrupts remain enabled. We used to check for that, but since it's
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
return intel_de_read(dev_priv, D_COMP_HSW);
else
return intel_de_read(dev_priv, D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(&dev_priv->drm,
"Failed to write to D_COMP\n");
} else {
intel_de_write(dev_priv, D_COMP_BDW, val);
intel_de_posting_read(dev_priv, D_COMP_BDW);
}
}
/*
* This function implements pieces of two sequences from BSpec:
* - Sequence for display software to disable LCPLL
* - Sequence for display software to allow package C8+
* The steps implemented here are just the steps that actually touch the LCPLL
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
assert_can_disable_lcpll(dev_priv);
val = intel_de_read(dev_priv, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(dev_priv, LCPLL_CTL, val);
if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
val = intel_de_read(dev_priv, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
intel_de_write(dev_priv, LCPLL_CTL, val);
intel_de_posting_read(dev_priv, LCPLL_CTL);
if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
ndelay(100);
if (wait_for((hsw_read_dcomp(dev_priv) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
intel_de_write(dev_priv, LCPLL_CTL, val);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
/*
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
u32 val;
val = intel_de_read(dev_priv, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
intel_de_write(dev_priv, LCPLL_CTL, val);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
intel_de_write(dev_priv, LCPLL_CTL, val);
if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
intel_de_write(dev_priv, LCPLL_CTL, val);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(&dev_priv->drm,
"Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
}
/*
* Package states C8 and deeper are really deep PC states that can only be
* reached when all the devices on the system allow it, so even if the graphics
* device allows PC8+, it doesn't mean the system will actually get to these
* states. Our driver only allows PC8+ when going into runtime PM.
*
* The requirements for PC8+ are that all the outputs are disabled, the power
* well is disabled and most interrupts are disabled, and these are also
* requirements for runtime PM. When these conditions are met, we manually do
* the other conditions: disable the interrupts, clocks and switch LCPLL refclk
* to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
* hang the machine.
*
* When we really reach PC8 or deeper states (not just when we allow it) we lose
* the state of some registers, so when we come back from PC8+ we need to
* restore this state. We don't get into PC8+ if we're not in RC6, so we don't
* need to take care of the registers kept by RC6. Notice that this happens even
* if we don't put the device in PCI D3 state (which is what currently happens
* because of the runtime PM support).
*
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv)) {
val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
}
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
i915_reg_t reg;
u32 reset_bits, val;
if (IS_IVYBRIDGE(dev_priv)) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
} else {
reg = HSW_NDE_RSTWRN_OPT;
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
if (DISPLAY_VER(dev_priv) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
val = intel_de_read(dev_priv, reg);
if (enable)
val |= reset_bits;
else
val &= ~reset_bits;
intel_de_write(dev_priv, reg, val);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* enable PCH reset handshake */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
if (!HAS_DISPLAY(dev_priv))
return;
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
if (resume)
intel_dmc_load_program(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
/* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
/*
* BSpec says to keep the MISC IO power well enabled here, only
* remove our request for power well 1.
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
* or else the reset will hang because there is no PCH to respond.
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
intel_pch_reset_handshake(dev_priv, false);
if (!HAS_DISPLAY(dev_priv))
return;
/* Enable PG1 */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
if (resume)
intel_dmc_load_program(dev_priv);
}
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
/* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/*
* Disable PW1 (PG1).
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
struct buddy_page_mask {
u32 page_mask;
u8 type;
u8 num_channels;
};
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
{ .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
{ .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
{}
};
static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
{ .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
{ .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
{}
};
static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
{
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
int config, i;
/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
return;
if (IS_ALDERLAKE_S(dev_priv) ||
drm/i915: Make display workaround upper bounds exclusive Workarounds are documented in the bspec with an exclusive upper bound (i.e., a "fixed" stepping that no longer needs the workaround). This makes our driver's use of an inclusive upper bound for stepping ranges confusing; the differing notation between code and bspec makes it very easy for mistakes to creep in. Let's switch the upper bound of our IS_{GT,DISP}_STEP macros over to use an exclusive upper bound like the bspec does. This also has the benefit of helping make sure workarounds are properly handled for new minor steppings that show up (e.g., an A1 between the A0 and B0 we already knew about) --- if the new intermediate stepping pulls in hardware fixes early, there will be an update to the workaround definition which lets us know we need to change our code. If the new stepping does not pull a hardware fix earlier, then the new stepping will already be captured properly by the "[begin, fix)" range in the code. We'll probably need to be extra vigilant in code review of new workarounds for the near future to make sure developers notice the new semantics of workaround bounds. But we just migrated a bunch of our platforms from the IS_REVID bounds over to IS_{GT,DISP}_STEP, so people are already adjusting to the new macros and now is a good time to make this change too. [mattrope: Split out display changes to apply through intel-next tree] Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: José Roberto de Souza <jose.souza@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210717051426.4120328-8-matthew.d.roper@intel.com
2021-07-17 05:14:26 +00:00
IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
/* Wa_1409767108:tgl,dg1,adl-s */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
for (config = 0; table[config].page_mask != 0; config++)
if (table[config].num_channels == num_channels &&
table[config].type == type)
break;
if (table[config].page_mask == 0) {
drm_dbg(&dev_priv->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
intel_de_write(dev_priv, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
if (DISPLAY_VER(dev_priv) == 12)
intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
}
}
}
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
if (!HAS_DISPLAY(dev_priv))
return;
/* 2. Initialize all combo phys */
intel_combo_phy_init(dev_priv);
/*
* 3. Enable Power Well 1 (PG1).
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
if (DISPLAY_VER(dev_priv) >= 12)
gen12_dbuf_slices_config(dev_priv);
/* 5. Enable DBUF. */
gen9_dbuf_enable(dev_priv);
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
/* 7. Program arbiter BW_BUDDY registers */
if (DISPLAY_VER(dev_priv) >= 12)
tgl_bw_buddy_init(dev_priv);
/* 8. Ensure PHYs have completed calibration and adaptation */
if (IS_DG2(dev_priv))
intel_snps_phy_wait_for_calibration(dev_priv);
if (resume)
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
if (DISPLAY_VER(dev_priv) >= 12) {
val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
}
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) >= 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
intel_dmc_disable_program(dev_priv);
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already
* disabled at this point.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
/* 5. */
intel_combo_phy_uninit(dev_priv);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
* workaround never ever read DISPLAY_PHY_CONTROL, and
* instead maintain a shadow copy ourselves. Use the actual
* power well state and lane status to reconstruct the
* expected initial value.
*/
dev_priv->display.power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
/*
* If all lanes are disabled we leave the override disabled
* with all power down bits cleared to match the state we
* would use after disabling the port. Otherwise enable the
* override and set the lane powerdown bits accding to the
* current lane status.
*/
if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
} else {
dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
}
if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
} else {
dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
}
drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
dev_priv->display.power.chv_phy_control);
drm/i915/display: Defer application of initial chv_phy_control To write to the DISPLAY_PHY_CONTROL requires holding the powerwells, which during early resume we have not yet acquired until later in intel_display_power_init_hw(). So compute the initial chv_phy_control, but leave the HW unset until we first acquire the powerwell. <7> [120.055984] i915 0000:00:02.0: [drm:intel_power_domains_init_hw [i915]] rawclk rate: 200000 kHz <4> [120.056381] ------------[ cut here ]------------ <4> [120.056621] i915 0000:00:02.0: Unclaimed write to register 0x1e0100 <4> [120.056924] WARNING: CPU: 1 PID: 164 at drivers/gpu/drm/i915/intel_uncore.c:1166 __unclaimed_reg_debug+0x69/0x80 [i915] <4> [120.056935] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic btusb btrtl btbcm btintel i915 bluetooth coretemp crct10dif_pclmul crc32_pclmul snd_hda_intel snd_intel_dspcfg snd_hda_codec ghash_clmulni_intel snd_hwdep ecdh_generic ecc snd_hda_core r8169 snd_pcm lpc_ich realtek pinctrl_cherryview i2c_designware_pci prime_numbers <4> [120.057027] CPU: 1 PID: 164 Comm: kworker/u4:3 Tainted: G U 5.5.0-CI-CI_DRM_7854+ #1 <4> [120.057038] Hardware name: /NUC5CPYB, BIOS PYBSWCEL.86A.0055.2016.0812.1130 08/12/2016 <4> [120.057058] Workqueue: events_unbound async_run_entry_fn <4> [120.057275] RIP: 0010:__unclaimed_reg_debug+0x69/0x80 [i915] <4> [120.057289] Code: 48 8b 78 18 48 8b 5f 50 48 85 db 74 2d e8 1f a0 3f e1 45 89 e8 48 89 e9 48 89 da 48 89 c6 48 c7 c7 00 8c 48 a0 e8 67 82 df e0 <0f> 0b 83 2d ce e2 2b 00 01 5b 5d 41 5c 41 5d c3 48 8b 1f eb ce 66 <4> [120.057301] RSP: 0018:ffffc90000bcfd08 EFLAGS: 00010082 <4> [120.057315] RAX: 0000000000000000 RBX: ffff888079919b60 RCX: 0000000000000003 <4> [120.057326] RDX: 0000000080000003 RSI: 0000000000000000 RDI: 00000000ffffffff <4> [120.057336] RBP: ffffffffa04c9f4e R08: 0000000000000000 R09: 0000000000000001 <4> [120.057348] R10: 0000000025c3d560 R11: 000000006815f798 R12: 0000000000000000 <4> [120.057359] R13: 00000000001e0100 R14: 0000000000000286 R15: ffffffff8234a76b <4> [120.057371] FS: 0000000000000000(0000) GS:ffff888074b00000(0000) knlGS:0000000000000000 <4> [120.057382] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [120.057393] CR2: 000055f4197df0d8 CR3: 000000006f326000 CR4: 00000000001006e0 <4> [120.057404] Call Trace: <4> [120.057635] fwtable_write32+0x114/0x1d0 [i915] <4> [120.057892] intel_power_domains_init_hw+0x4ff/0x650 [i915] <4> [120.058150] intel_power_domains_resume+0x3d/0x70 [i915] <4> [120.058363] i915_drm_resume_early+0x97/0xd0 [i915] <4> [120.058575] ? i915_resume_switcheroo+0x30/0x30 [i915] <4> [120.058594] dpm_run_callback+0x64/0x280 <4> [120.058626] device_resume_early+0xa7/0xe0 <4> [120.058652] async_resume_early+0x14/0x40 v2: Write our expected value of DISPLAY_PHY_CONTROL during sync_hw, so that it should always match the driver state after resume. Closes: https://gitlab.freedesktop.org/drm/intel/issues/1089 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200203145016.216692-1-chris@chris-wilson.co.uk
2020-02-03 14:50:16 +00:00
/* Defer application of initial phy_control to enabling the powerwell */
}
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *disp2d =
lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
if (intel_power_well_is_enabled(dev_priv, cmn) &&
intel_power_well_is_enabled(dev_priv, disp2d) &&
intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
return;
drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
intel_power_well_enable(dev_priv, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
intel_power_well_disable(dev_priv, cmn);
}
static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
{
bool ret;
vlv_punit_get(dev_priv);
ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
vlv_punit_put(dev_priv);
return ret;
}
static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
{
drm_WARN(&dev_priv->drm,
!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
"VED not power gated\n");
}
static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{
static const struct pci_device_id isp_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
{}
};
drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
"ISP not power gated\n");
}
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @i915: i915 device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
* power wells belonging to the INIT power domain. Power wells in other
* domains (and not in the INIT domain) are referenced or disabled by
* intel_modeset_readout_hw_state(). After that the reference count of each
* power well must match its HW enabled state, see
* intel_power_domains_verify_state().
*
* It will return with power domains disabled (to be enabled later by
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_driver_remove().
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
power_domains->initializing = true;
if (DISPLAY_VER(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_display_core_init(i915, resume);
drm/i915/display: rename display version macros While converting the rest of the driver to use GRAPHICS_VER() and MEDIA_VER(), following what was done for display, some discussions went back on what we did for display: 1) Why is the == comparison special that deserves a separate macro instead of just getting the version and comparing directly like is done for >, >=, <=? 2) IS_DISPLAY_RANGE() is weird in that it omits the "_VER" for brevity. If we remove the current users of IS_DISPLAY_VER(), we could actually repurpose it for a range check With (1) there could be an advantage if we used gen_mask since multiple conditionals be combined by the compiler in a single and instruction and check the result. However a) INTEL_GEN() doesn't use the mask since it would make the code bigger everywhere else and b) in the cases it made sense, it also made sense to convert to the _RANGE() variant. So here we repurpose IS_DISPLAY_VER() to work with a [ from, to ] range like was the IS_DISPLAY_RANGE() and convert the current IS_DISPLAY_VER() users to use == and != operators. Aside from the definition changes, this was done by the following semantic patch: @@ expression dev_priv, E1; @@ - !IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) != E1 @@ expression dev_priv, E1; @@ - IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) == E1 @@ expression dev_priv, from, until; @@ - IS_DISPLAY_RANGE(dev_priv, from, until) + IS_DISPLAY_VER(dev_priv, from, until) Cc: Jani Nikula <jani.nikula@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> [Jani: Minor conflict resolve while applying.] Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-4-lucas.demarchi@intel.com
2021-04-13 05:09:53 +00:00
} else if (DISPLAY_VER(i915) == 9) {
skl_display_core_init(i915, resume);
} else if (IS_CHERRYVIEW(i915)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
assert_isp_power_gated(i915);
} else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
assert_ved_power_gated(i915);
assert_isp_power_gated(i915);
} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
hsw_assert_cdclk(i915);
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
} else if (IS_IVYBRIDGE(i915)) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
/*
* Keep all power wells enabled for any dependent HW access during
* initialization and to make sure we keep BIOS enabled display HW
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!i915->params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
}
/**
* intel_power_domains_driver_remove - deinitialize hw power domain state
* @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
*
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
intel_power_domains_verify_state(i915);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
/**
* intel_power_domains_sanitize_state - sanitize power domains state
* @i915: i915 device instance
*
* Sanitize the power domains state during driver loading and system resume.
* The function will disable all display power wells that BIOS has enabled
* without a user for it (any user for a power well has taken a reference
* on it by the time this function is called, after the state of all the
* pipe, encoder, etc. HW resources have been sanitized).
*/
void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
for_each_power_well_reverse(i915, power_well) {
if (power_well->desc->always_on || power_well->count ||
!intel_power_well_is_enabled(i915, power_well))
continue;
drm_dbg_kms(&i915->drm,
"BIOS left unused %s power well enabled, disabling it\n",
intel_power_well_name(power_well));
intel_power_well_disable(i915, power_well);
}
mutex_unlock(&power_domains->lock);
}
/**
* intel_power_domains_enable - enable toggling of display power wells
* @i915: i915 device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
* only at specific points of the display modeset sequence, thus they are not
* affected by the intel_power_domains_enable()/disable() calls. The purpose
* of these function is to keep the rest of power wells enabled until the end
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->display.power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
* @i915: i915 device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_suspend - suspend power domain state
* @i915: i915 device instance
* @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
*
* This function prepares the hardware power domain state before entering
* system suspend.
*
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
* support don't manually deinit the power domains. This also means the
* DMC firmware will stay active, it will power down any HW
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
return;
}
/*
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
if (DISPLAY_VER(i915) >= 11)
icl_display_core_uninit(i915);
else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
bxt_display_core_uninit(i915);
drm/i915/display: rename display version macros While converting the rest of the driver to use GRAPHICS_VER() and MEDIA_VER(), following what was done for display, some discussions went back on what we did for display: 1) Why is the == comparison special that deserves a separate macro instead of just getting the version and comparing directly like is done for >, >=, <=? 2) IS_DISPLAY_RANGE() is weird in that it omits the "_VER" for brevity. If we remove the current users of IS_DISPLAY_VER(), we could actually repurpose it for a range check With (1) there could be an advantage if we used gen_mask since multiple conditionals be combined by the compiler in a single and instruction and check the result. However a) INTEL_GEN() doesn't use the mask since it would make the code bigger everywhere else and b) in the cases it made sense, it also made sense to convert to the _RANGE() variant. So here we repurpose IS_DISPLAY_VER() to work with a [ from, to ] range like was the IS_DISPLAY_RANGE() and convert the current IS_DISPLAY_VER() users to use == and != operators. Aside from the definition changes, this was done by the following semantic patch: @@ expression dev_priv, E1; @@ - !IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) != E1 @@ expression dev_priv, E1; @@ - IS_DISPLAY_VER(dev_priv, E1) + DISPLAY_VER(dev_priv) == E1 @@ expression dev_priv, from, until; @@ - IS_DISPLAY_RANGE(dev_priv, from, until) + IS_DISPLAY_VER(dev_priv, from, until) Cc: Jani Nikula <jani.nikula@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> [Jani: Minor conflict resolve while applying.] Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20210413051002.92589-4-lucas.demarchi@intel.com
2021-04-13 05:09:53 +00:00
else if (DISPLAY_VER(i915) == 9)
skl_display_core_uninit(i915);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
* @i915: i915 device instance
*
* This function resume the hardware power domain state during system resume.
*
* It will return with power domain support disabled (to be enabled later by
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
void intel_power_domains_resume(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
if (power_domains->display_core_suspended) {
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
intel_power_domains_verify_state(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%-25s %d\n",
intel_power_well_name(power_well), intel_power_well_refcount(power_well));
for_each_power_domain(domain, intel_power_well_domains(power_well))
drm_dbg(&i915->drm, " %-23s %d\n",
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
* @i915: i915 device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
* called after modeset HW state sanitization, which is responsible for
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
mutex_lock(&power_domains->lock);
verify_async_put_domains_state(power_domains);
dump_domain_info = false;
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
enabled = intel_power_well_is_enabled(i915, power_well);
if ((intel_power_well_refcount(power_well) ||
intel_power_well_is_always_on(power_well)) !=
enabled)
drm_err(&i915->drm,
"power well %s state mismatch (refcount %d/enabled %d)",
intel_power_well_name(power_well),
intel_power_well_refcount(power_well), enabled);
domains_count = 0;
for_each_power_domain(domain, intel_power_well_domains(power_well))
domains_count += power_domains->domain_use_count[domain];
if (intel_power_well_refcount(power_well) != domains_count) {
drm_err(&i915->drm,
"power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
intel_power_well_name(power_well),
intel_power_well_refcount(power_well),
domains_count);
dump_domain_info = true;
}
}
if (dump_domain_info) {
static bool dumped;
if (!dumped) {
intel_power_domains_dump_info(i915);
dumped = true;
}
}
mutex_unlock(&power_domains->lock);
}
#else
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
}
#endif
void intel_display_power_suspend_late(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
drm/i915: Tweaked Wa_14010685332 for PCHs used on gen11 platforms The WA specifies that we need to toggle a SDE chicken bit on and then off as the final step in preparation for s0ix entry. Bspec: 33450 Bspec: 8402 However, something is happening after we toggle the bit that causes the WA to be invalidated. This makes dispcnlunit1_cp_xosc_clkreq active being already in s0ix state i.e SLP_S0 counter incremented. Tweaking the Wa_14010685332 by setting the bit on suspend and clearing it on resume turns down the dispcnlunit1_cp_xosc_clkreq. B.Spec has Documented this tweaked sequence of WA as an alternative. Let keep this tweaked WA for Gen11 platforms and keep untweaked WA for other platforms which never observed this issue. v2 (MattR): - Change the comment on the workaround to give PCH names rather than platform names. Although the bspec is setup to list workarounds by platform, the hardware team has confirmed that the actual issue being worked around here is something that was introduced back in the Cannon Lake PCH and carried forward to subsequent PCH's. - Extend the untweaked version of the workaround to include PCH_CNP as well. Note that since PCH_CNP is used to represent CMP, this will apply on CML and some variants of RKL too. - Cap the untweaked version of the workaround so that it won't apply to "fake" PCH's (i.e., DG1). The issue we're working around really is an issue in the PCH itself, not the South Display, so it shouldn't apply when there isn't a real PCH. v3: - use intel_de_rmw(). [Rodrigo] Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Bob Paauwe <bob.j.paauwe@intel.com> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201110121700.4338-1-anshuman.gupta@intel.com Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2020-11-10 12:17:00 +00:00
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
drm/i915: Tweaked Wa_14010685332 for PCHs used on gen11 platforms The WA specifies that we need to toggle a SDE chicken bit on and then off as the final step in preparation for s0ix entry. Bspec: 33450 Bspec: 8402 However, something is happening after we toggle the bit that causes the WA to be invalidated. This makes dispcnlunit1_cp_xosc_clkreq active being already in s0ix state i.e SLP_S0 counter incremented. Tweaking the Wa_14010685332 by setting the bit on suspend and clearing it on resume turns down the dispcnlunit1_cp_xosc_clkreq. B.Spec has Documented this tweaked sequence of WA as an alternative. Let keep this tweaked WA for Gen11 platforms and keep untweaked WA for other platforms which never observed this issue. v2 (MattR): - Change the comment on the workaround to give PCH names rather than platform names. Although the bspec is setup to list workarounds by platform, the hardware team has confirmed that the actual issue being worked around here is something that was introduced back in the Cannon Lake PCH and carried forward to subsequent PCH's. - Extend the untweaked version of the workaround to include PCH_CNP as well. Note that since PCH_CNP is used to represent CMP, this will apply on CML and some variants of RKL too. - Cap the untweaked version of the workaround so that it won't apply to "fake" PCH's (i.e., DG1). The issue we're working around really is an issue in the PCH itself, not the South Display, so it shouldn't apply when there isn't a real PCH. v3: - use intel_de_rmw(). [Rodrigo] Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Bob Paauwe <bob.j.paauwe@intel.com> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20201110121700.4338-1-anshuman.gupta@intel.com Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2020-11-10 12:17:00 +00:00
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
void intel_display_power_suspend(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 11) {
icl_display_core_uninit(i915);
bxt_enable_dc9(i915);
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_display_core_uninit(i915);
bxt_enable_dc9(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}
}
void intel_display_power_resume(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
else if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
int i;
mutex_lock(&power_domains->lock);
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
enum intel_display_power_domain power_domain;
power_well = &power_domains->power_wells[i];
seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
intel_power_well_refcount(power_well));
for_each_power_domain(power_domain, intel_power_well_domains(power_well))
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
mutex_unlock(&power_domains->lock);
}
struct intel_ddi_port_domains {
enum port port_start;
enum port port_end;
enum aux_ch aux_ch_start;
enum aux_ch aux_ch_end;
enum intel_display_power_domain ddi_lanes;
enum intel_display_power_domain ddi_io;
enum intel_display_power_domain aux_io;
enum intel_display_power_domain aux_legacy_usbc;
enum intel_display_power_domain aux_tbt;
};
static const struct intel_ddi_port_domains
i9xx_port_domains[] = {
{
.port_start = PORT_A,
.port_end = PORT_F,
.aux_ch_start = AUX_CH_A,
.aux_ch_end = AUX_CH_F,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
.aux_io = POWER_DOMAIN_AUX_IO_A,
.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
.aux_tbt = POWER_DOMAIN_INVALID,
},
};
static const struct intel_ddi_port_domains
d11_port_domains[] = {
{
.port_start = PORT_A,
.port_end = PORT_B,
.aux_ch_start = AUX_CH_A,
.aux_ch_end = AUX_CH_B,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
.aux_io = POWER_DOMAIN_AUX_IO_A,
.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
.aux_tbt = POWER_DOMAIN_INVALID,
}, {
.port_start = PORT_C,
.port_end = PORT_F,
.aux_ch_start = AUX_CH_C,
.aux_ch_end = AUX_CH_F,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
.aux_io = POWER_DOMAIN_AUX_IO_C,
.aux_legacy_usbc = POWER_DOMAIN_AUX_C,
.aux_tbt = POWER_DOMAIN_AUX_TBT1,
},
};
static const struct intel_ddi_port_domains
d12_port_domains[] = {
{
.port_start = PORT_A,
.port_end = PORT_C,
.aux_ch_start = AUX_CH_A,
.aux_ch_end = AUX_CH_C,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
.aux_io = POWER_DOMAIN_AUX_IO_A,
.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
.aux_tbt = POWER_DOMAIN_INVALID,
}, {
.port_start = PORT_TC1,
.port_end = PORT_TC6,
.aux_ch_start = AUX_CH_USBC1,
.aux_ch_end = AUX_CH_USBC6,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
.aux_io = POWER_DOMAIN_INVALID,
.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
.aux_tbt = POWER_DOMAIN_AUX_TBT1,
},
};
static const struct intel_ddi_port_domains
d13_port_domains[] = {
{
.port_start = PORT_A,
.port_end = PORT_C,
.aux_ch_start = AUX_CH_A,
.aux_ch_end = AUX_CH_C,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
.aux_io = POWER_DOMAIN_AUX_IO_A,
.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
.aux_tbt = POWER_DOMAIN_INVALID,
}, {
.port_start = PORT_TC1,
.port_end = PORT_TC4,
.aux_ch_start = AUX_CH_USBC1,
.aux_ch_end = AUX_CH_USBC4,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
.aux_io = POWER_DOMAIN_INVALID,
.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
.aux_tbt = POWER_DOMAIN_AUX_TBT1,
}, {
.port_start = PORT_D_XELPD,
.port_end = PORT_E_XELPD,
.aux_ch_start = AUX_CH_D_XELPD,
.aux_ch_end = AUX_CH_E_XELPD,
.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
.ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
.aux_io = POWER_DOMAIN_AUX_IO_D,
.aux_legacy_usbc = POWER_DOMAIN_AUX_D,
.aux_tbt = POWER_DOMAIN_INVALID,
},
};
static void
intel_port_domains_for_platform(struct drm_i915_private *i915,
const struct intel_ddi_port_domains **domains,
int *domains_size)
{
if (DISPLAY_VER(i915) >= 13) {
*domains = d13_port_domains;
*domains_size = ARRAY_SIZE(d13_port_domains);
} else if (DISPLAY_VER(i915) >= 12) {
*domains = d12_port_domains;
*domains_size = ARRAY_SIZE(d12_port_domains);
} else if (DISPLAY_VER(i915) >= 11) {
*domains = d11_port_domains;
*domains_size = ARRAY_SIZE(d11_port_domains);
} else {
*domains = i9xx_port_domains;
*domains_size = ARRAY_SIZE(i9xx_port_domains);
}
}
static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
intel_port_domains_for_platform(i915, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (port >= domains[i].port_start && port <= domains[i].port_end)
return &domains[i];
return NULL;
}
enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_IO_A;
drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c Fix the following sparse warnings: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum aux_ch drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum aux_ch Fixes: 979e1b32e0e2 ("drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform") Reported-by: Jani Nikula <jani.nikula@intel.com> Cc: Jouni Högander <jouni.hogander@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220510114957.406070-1-imre.deak@intel.com
2022-05-10 11:49:57 +00:00
return domains->ddi_io + (int)(port - domains->port_start);
}
enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_LANES_A;
drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c Fix the following sparse warnings: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum aux_ch drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum aux_ch Fixes: 979e1b32e0e2 ("drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform") Reported-by: Jani Nikula <jani.nikula@intel.com> Cc: Jouni Högander <jouni.hogander@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220510114957.406070-1-imre.deak@intel.com
2022-05-10 11:49:57 +00:00
return domains->ddi_lanes + (int)(port - domains->port_start);
}
static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
intel_port_domains_for_platform(i915, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
return &domains[i];
return NULL;
}
enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_IO_A;
return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
}
enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_A;
drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c Fix the following sparse warnings: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum aux_ch drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum aux_ch Fixes: 979e1b32e0e2 ("drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform") Reported-by: Jani Nikula <jani.nikula@intel.com> Cc: Jouni Högander <jouni.hogander@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220510114957.406070-1-imre.deak@intel.com
2022-05-10 11:49:57 +00:00
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
}
enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_TBT1;
drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c Fix the following sparse warnings: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2431:34: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2442:37: int enum port drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2468:43: unsigned int enum aux_ch drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: warning: mixing different enum types: drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum intel_display_power_domain drivers/gpu/drm/i915/display/intel_display_power.c:2479:35: unsigned int enum aux_ch Fixes: 979e1b32e0e2 ("drm/i915: Sanitize the port -> DDI/AUX power domain mapping for each platform") Reported-by: Jani Nikula <jani.nikula@intel.com> Cc: Jouni Högander <jouni.hogander@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220510114957.406070-1-imre.deak@intel.com
2022-05-10 11:49:57 +00:00
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
}