Merge tag 'drm-intel-next-2018-11-22' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Changes outside i915:
- Connector property to limit max bpc (Radhakrishna)
- Fix LPE audio runtime PM and deinit (Ville)
- DP FEC prep work (Anusha)
- Mark pinned shmemfs pages as unevictable (Kuo-Hsin)
- Backmerge drm-next (Jani)

Inside i915:
- Revert OA UAPI change that lacks userspace (Joonas)
- Register macro cleanup (Jani)
- 32-bit build fixes on pin flags (Chris)
- Fix MG DP mode and PHY gating for HDMI (Imre)
- DP MST race, hpd and irq fixes (Lyude)
- Combo PHY fixes and cleanup (Imre, Lucas)
- Move display init and cleanup under modeset init and cleanup (José)
- PSR fixes (José)
- Subslice size fixes (Daniele)
- Abstract and clean up fixed point helpers (Jani)
- Plane input CSC for YUV to RGB conversion (Uma)
- Break long iterations for get/put shmemfs pages (Chris)
- Improve DDI encoder hw state readout sanity checks (Imre)
- Fix power well leaks for MST (José)
- Scaler fixes (Ville)
- Watermark fixes (Ville)
- Fix VLV/CHV DSI panel orientation readout (Ville)
- ICL rawclock fixes (Paulo)
- Workaround DMC power well request issues (Imre)
- Plane allocation fix (Maarten)
- Transcoder enum value/ordering robustness fixes (Imre)
- UTS_RELEASE build dependency fix (Hans Holmberg)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87k1l4cesj.fsf@intel.com
This commit is contained in:
Dave Airlie 2018-11-29 09:50:33 +10:00
commit bfeb122d30
142 changed files with 9690 additions and 5455 deletions

View file

@ -143,7 +143,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely
unevictable.
These are currently used in two places in the kernel:
These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode.
@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
(3) By the i915 driver to mark pinned address space until it's unpinned. The
amount of unevictable memory marked by i915 driver is roughly the bounded
object size in debugfs/dri/0/i915_gem_objects.
Detecting Unevictable Pages
---------------------------

View file

@ -398,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
const struct drm_display_info *info = &connector->display_info;
state->max_bpc = info->bpc ? info->bpc : 8;
if (connector->max_bpc_property)
state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;

View file

@ -669,6 +669,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (old_connector_state->link_status !=
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
if (old_connector_state->max_requested_bpc !=
new_connector_state->max_requested_bpc)
new_crtc_state->connectors_changed = true;
}
if (funcs->atomic_check)

View file

@ -740,6 +740,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
} else if (property == connector->max_bpc_property) {
state->max_requested_bpc = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@ -804,6 +806,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
} else if (property == connector->max_bpc_property) {
*val = state->max_requested_bpc;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);

View file

@ -932,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* is no longer protected and userspace should take appropriate action
* (whatever that might be).
*
* max bpc:
* This range property is used by userspace to limit the bit depth. When
* used the driver would limit the bpc in accordance with the valid range
* supported by the hardware and sink. Drivers to use the function
* drm_connector_attach_max_bpc_property() to create and attach the
* property to the connector during initialization.
*
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
@ -1599,6 +1606,40 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
* drm_connector_attach_max_bpc_property - attach "max bpc" property
* @connector: connector to attach max bpc property on.
* @min: The minimum bit depth supported by the connector.
* @max: The maximum bit depth supported by the connector.
*
* This is used to add support for limiting the bit depth on a connector.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop;
prop = connector->max_bpc_property;
if (!prop) {
prop = drm_property_create_range(dev, 0, "max bpc", min, max);
if (!prop)
return -ENOMEM;
connector->max_bpc_property = prop;
}
drm_object_attach_property(&connector->base, prop, max);
connector->state->max_requested_bpc = max;
connector->state->max_bpc = max;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
/**
* drm_connector_init_panel_orientation_property -
* initialize the connecters panel_orientation property

View file

@ -1352,3 +1352,93 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
* DRM DP Helpers for DSC
*/
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp)
{
u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
if (is_edp) {
/* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
} else {
/* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
return 24;
if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
return 20;
if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
return 16;
if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
return 12;
if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
return 10;
if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
return 8;
if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
return 6;
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
case DP_DSC_LINE_BUF_BIT_DEPTH_9:
return 9;
case DP_DSC_LINE_BUF_BIT_DEPTH_10:
return 10;
case DP_DSC_LINE_BUF_BIT_DEPTH_11:
return 11;
case DP_DSC_LINE_BUF_BIT_DEPTH_12:
return 12;
case DP_DSC_LINE_BUF_BIT_DEPTH_13:
return 13;
case DP_DSC_LINE_BUF_BIT_DEPTH_14:
return 14;
case DP_DSC_LINE_BUF_BIT_DEPTH_15:
return 15;
case DP_DSC_LINE_BUF_BIT_DEPTH_16:
return 16;
case DP_DSC_LINE_BUF_BIT_DEPTH_8:
return 8;
}
return 0;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
if (color_depth & DP_DSC_12_BPC)
return 12;
if (color_depth & DP_DSC_10_BPC)
return 10;
if (color_depth & DP_DSC_8_BPC)
return 8;
return 0;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);

View file

@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
@ -112,6 +113,8 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_cdclk.o \
intel_color.o \
intel_combo_phy.o \
intel_connector.o \
intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \
@ -120,9 +123,9 @@ i915-y += intel_audio.o \
intel_frontbuffer.o \
intel_hdcp.o \
intel_hotplug.o \
intel_modes.o \
intel_overlay.o \
intel_psr.o \
intel_quirks.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \

View file

@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -1;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
} else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
px_dma(ppgtt->pdp.page_directory[i]) =
mm->ppgtt_mm.shadow_pdps[i];
}
}
return 0;
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return ret;
}
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that

View file

@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN5(dev_priv))
return -ENODEV;
intel_runtime_pm_get(dev_priv);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
struct drm_file *file;
if (intel_runtime_pm_get_if_in_use(dev_priv)) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
act_freq = vlv_punit_read(dev_priv,
PUNIT_REG_GPU_FREQ_STS);
act_freq = (act_freq >> 8) & 0xff;
mutex_unlock(&dev_priv->pcu_lock);
} else {
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
}
intel_runtime_pm_put(dev_priv);
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, "Frequency requested %d, actual %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq),
intel_gpu_freq(dev_priv, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, rps->min_freq),
intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
if (IS_KABYLAKE(dev_priv) ||
(IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(SKL_CSR_DC3_DC5_COUNT));
if (WARN_ON(INTEL_GEN(dev_priv) > 11))
goto out;
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
SKL_CSR_DC3_DC5_COUNT));
if (!IS_GEN9_LP(dev_priv))
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(BXT_CSR_DC3_DC5_COUNT));
}
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
seq_printf(m, "\tname: %s\n", connector->display_info.name);
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
connector->display_info.width_mm,
connector->display_info.height_mm);
seq_printf(m, "\tsubpixel order: %s\n",
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
if (connector->status == connector_status_disconnected)
return;
seq_printf(m, "\tname: %s\n", connector->display_info.name);
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
connector->display_info.width_mm,
connector->display_info.height_mm);
seq_printf(m, "\tsubpixel order: %s\n",
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
if (!intel_encoder)
return;
@ -4172,6 +4192,7 @@ i915_drop_caches_set(void *data, u64 val)
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@ -4181,7 +4202,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
goto out;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@ -4189,11 +4210,8 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret == 0 && val & DROP_RESET_SEQNO) {
intel_runtime_pm_get(i915);
if (ret == 0 && val & DROP_RESET_SEQNO)
ret = i915_gem_set_global_seqno(&i915->drm, 1);
intel_runtime_pm_put(i915);
}
if (val & DROP_RETIRE)
i915_retire_requests(i915);
@ -4231,6 +4249,9 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
out:
intel_runtime_pm_put(i915);
return ret;
}
@ -4331,7 +4352,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
* only valid bits for those registers, excluding reserverd
* only valid bits for those registers, excluding reserved
* although this seems wrong because it would leave many
* subslices without ACK.
*/
@ -4641,24 +4662,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write
};
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Enabled: %s\n",
yesno(dev_priv->hotplug.hpd_short_storm_enabled));
return 0;
}
static int
i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
{
return single_open(file, i915_hpd_short_storm_ctl_show,
inode->i_private);
}
static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
char *newline;
char tmp[16];
int i;
bool new_state;
if (len >= sizeof(tmp))
return -EINVAL;
if (copy_from_user(tmp, ubuf, len))
return -EFAULT;
tmp[len] = '\0';
/* Strip newline, if any */
newline = strchr(tmp, '\n');
if (newline)
*newline = '\0';
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
new_state = !HAS_DP_MST(dev_priv);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
spin_lock_irq(&dev_priv->irq_lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
flush_delayed_work(&dev_priv->hotplug.reenable_work);
return len;
}
static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.owner = THIS_MODULE,
.open = i915_hpd_short_storm_ctl_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = i915_hpd_short_storm_ctl_write,
};
static int i915_drrs_ctl_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *intel_crtc;
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 7)
return -ENODEV;
drm_modeset_lock_all(dev);
for_each_intel_crtc(dev, intel_crtc) {
if (!intel_crtc->base.state->active ||
!intel_crtc->config->has_drrs)
continue;
for_each_intel_crtc(dev, crtc) {
struct drm_connector_list_iter conn_iter;
struct intel_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_crtc_commit *commit;
int ret;
for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
if (ret)
return ret;
crtc_state = to_intel_crtc_state(crtc->base.state);
if (!crtc_state->base.active ||
!crtc_state->has_drrs)
goto out;
commit = crtc_state->base.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
goto out;
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
if (!(crtc_state->base.connector_mask &
drm_connector_mask(connector)))
continue;
encoder = intel_attached_encoder(connector);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@ -4668,13 +4787,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(&encoder->base);
if (val)
intel_edp_drrs_enable(intel_dp,
intel_crtc->config);
crtc_state);
else
intel_edp_drrs_disable(intel_dp,
intel_crtc->config);
crtc_state);
}
drm_connector_list_iter_end(&conn_iter);
out:
drm_modeset_unlock(&crtc->base.mutex);
if (ret)
return ret;
}
drm_modeset_unlock_all(dev);
return 0;
}
@ -4818,6 +4942,7 @@ static const struct i915_debugfs_files {
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@ -4899,13 +5024,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
continue;
err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
if (err <= 0) {
DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
size, b->offset, err);
continue;
}
seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
if (err < 0)
seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
else
seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
}
return 0;
@ -4934,6 +5056,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
if (connector->status != connector_status_connected)
return -ENODEV;
/* HDCP is supported by connector */
if (!intel_connector->hdcp.shim)
return -EINVAL;
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
"None" : "HDCP1.4");
seq_puts(m, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@ -4963,5 +5107,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
connector, &i915_hdcp_sink_capability_fops);
}
return 0;
}

View file

@ -345,7 +345,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = USES_PPGTT(dev_priv);
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = HAS_LEGACY_SEMAPHORES(dev_priv);
@ -645,6 +645,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
if (INTEL_INFO(dev_priv)->num_pipes) {
ret = drm_vblank_init(&dev_priv->drm,
INTEL_INFO(dev_priv)->num_pipes);
if (ret)
goto out;
}
intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
@ -687,7 +694,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_modeset;
intel_setup_overlay(dev_priv);
intel_overlay_setup(dev_priv);
if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
@ -699,6 +706,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
intel_init_ipc(dev_priv);
return 0;
cleanup_gem:
@ -1030,6 +1039,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
err_uncore:
intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev_priv);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@ -1049,17 +1059,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915_modparams.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv,
i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
intel_gvt_sanitize_options(dev_priv);
}
@ -1340,7 +1339,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
else if (INTEL_GEN(dev_priv) == 9)
else if (IS_GEN9(dev_priv))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@ -1375,6 +1374,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
i915_report_error(dev_priv,
"incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
}
}
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@ -1630,14 +1638,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
int err;
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
if (!i915)
return NULL;
return ERR_PTR(-ENOMEM);
if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
if (err) {
kfree(i915);
return NULL;
return ERR_PTR(err);
}
i915->drm.pdev = pdev;
@ -1650,8 +1660,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
device_info->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
sizeof(device_info->platform_mask) * BITS_PER_BYTE);
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
}
@ -1686,8 +1696,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
dev_priv = i915_driver_create(pdev, ent);
if (!dev_priv)
return -ENOMEM;
if (IS_ERR(dev_priv))
return PTR_ERR(dev_priv);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@ -1711,26 +1721,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
/*
* TODO: move the vblank init and parts of modeset init steps into one
* of the i915_driver_init_/i915_driver_register functions according
* to the role/effect of the given init step.
*/
if (INTEL_INFO(dev_priv)->num_pipes) {
ret = drm_vblank_init(&dev_priv->drm,
INTEL_INFO(dev_priv)->num_pipes);
if (ret)
goto out_cleanup_hw;
}
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
i915_driver_register(dev_priv);
intel_init_ipc(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
i915_welcome_messages(dev_priv);
@ -1782,7 +1778,6 @@ void i915_driver_unload(struct drm_device *dev)
i915_reset_error_state(dev_priv);
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@ -1920,9 +1915,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
intel_opregion_unregister(dev_priv);
intel_opregion_suspend(dev_priv, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@ -1963,7 +1956,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
get_suspend_mode(dev_priv, hibernation));
ret = 0;
if (IS_GEN9_LP(dev_priv))
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@ -2041,7 +2034,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev_priv);
@ -2083,12 +2075,10 @@ static int i915_drm_resume(struct drm_device *dev)
* */
intel_hpd_init(dev_priv);
intel_opregion_register(dev_priv);
intel_opregion_resume(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_power_domains_enable(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@ -2156,7 +2146,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@ -2923,7 +2913,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
ret = 0;
if (IS_GEN9_LP(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
icl_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@ -3008,7 +3001,18 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
if (IS_GEN9_LP(dev_priv)) {
if (INTEL_GEN(dev_priv) >= 11) {
bxt_disable_dc9(dev_priv);
icl_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload) {
if (dev_priv->csr.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(dev_priv);
else if (dev_priv->csr.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(dev_priv);
}
} else if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&

View file

@ -54,6 +54,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@ -87,8 +88,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20180921"
#define DRIVER_TIMESTAMP 1537521997
#define DRIVER_DATE "20181122"
#define DRIVER_TIMESTAMP 1542898187
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -127,144 +128,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
#define FP_16_16_MAX ({ \
uint_fixed_16_16_t fp; \
fp.val = UINT_MAX; \
fp; \
})
static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
{
if (val.val == 0)
return true;
return false;
}
static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
WARN_ON(val > U16_MAX);
fp.val = val << 16;
return fp;
}
static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2)
{
uint_fixed_16_16_t min;
min.val = min(min1.val, min2.val);
return min;
}
static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2)
{
uint_fixed_16_16_t max;
max.val = max(max1.val, max2.val);
return max;
}
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
{
uint_fixed_16_16_t fp;
WARN_ON(val > U32_MAX);
fp.val = (uint32_t) val;
return fp;
}
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d)
{
return DIV_ROUND_UP(val.val, d.val);
}
static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val > U32_MAX);
return (uint32_t) intermediate_val;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
intermediate_val = (uint64_t) val.val * mul.val;
intermediate_val = intermediate_val >> 16;
return clamp_u64_to_fixed16(intermediate_val);
}
static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
{
uint64_t interm_val;
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
return clamp_u64_to_fixed16(interm_val);
}
static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t d)
{
uint64_t interm_val;
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
WARN_ON(interm_val > U32_MAX);
return (uint32_t) interm_val;
}
static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
intermediate_val = (uint64_t) val * mul.val;
return clamp_u64_to_fixed16(intermediate_val);
}
static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
uint_fixed_16_16_t add2)
{
uint64_t interm_sum;
interm_sum = (uint64_t) add1.val + add2.val;
return clamp_u64_to_fixed16(interm_sum);
}
static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
uint32_t add2)
{
uint64_t interm_sum;
uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
interm_sum = (uint64_t) add1.val + interm_add2.val;
return clamp_u64_to_fixed16(interm_sum);
}
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@ -283,7 +146,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
#define HPD_STORM_DEFAULT_THRESHOLD 5
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
struct work_struct hotplug_work;
@ -308,6 +172,8 @@ struct i915_hotplug {
bool poll_enabled;
unsigned int hpd_storm_threshold;
/* Whether or not to count short HPD IRQs in HPD storms */
u8 hpd_short_storm_enabled;
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
@ -465,8 +331,10 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
uint32_t required_version;
uint32_t max_fw_size; /* bytes */
uint32_t *dmc_payload;
uint32_t dmc_fw_size;
uint32_t dmc_fw_size; /* dwords */
uint32_t version;
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
@ -546,6 +414,8 @@ struct intel_fbc {
int adjusted_y;
int y;
uint16_t pixel_blend_mode;
} plane;
struct {
@ -630,7 +500,6 @@ struct i915_psr {
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool alpm;
bool psr2_enabled;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
@ -918,6 +787,11 @@ struct i915_power_well_desc {
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
* Thunderbolt mode.
*/
bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
@ -1042,17 +916,6 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
#define DDC_PIN_C 0x04
#define DDC_PIN_D 0x06
struct ddi_vbt_port_info {
int max_tmds_clock;
@ -1099,6 +962,7 @@ struct intel_vbt_data {
unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drm_panel_orientation orientation;
enum drrs_support_type drrs_type;
@ -1144,6 +1008,7 @@ struct intel_vbt_data {
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
enum drm_panel_orientation orientation;
} dsi;
int crt_ddc_pin;
@ -1240,9 +1105,9 @@ struct skl_ddb_values {
};
struct skl_wm_level {
bool plane_en;
uint16_t plane_res_b;
uint8_t plane_res_l;
bool plane_en;
};
/* Stores plane specific WM parameters */
@ -1519,31 +1384,13 @@ struct i915_oa_ops {
*/
bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
/**
* @init_oa_buffer: Resets the head and tail pointers of the
* circular buffer for periodic OA reports.
*
* Called when first opening a stream for OA metrics, but also may be
* called in response to an OA buffer overflow or other error
* condition.
*
* Note it may be necessary to clear the full OA buffer here as part of
* maintaining the invariable that new reports must be written to
* zeroed memory for us to be able to reliable detect if an expected
* report has not yet landed in memory. (At least on Haswell the OA
* buffer tail pointer is not synchronized with reports being visible
* to the CPU)
*/
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
/**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
int (*enable_metric_set)(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config);
int (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
@ -1554,12 +1401,12 @@ struct i915_oa_ops {
/**
* @oa_enable: Enable periodic sampling
*/
void (*oa_enable)(struct drm_i915_private *dev_priv);
void (*oa_enable)(struct i915_perf_stream *stream);
/**
* @oa_disable: Disable periodic sampling
*/
void (*oa_disable)(struct drm_i915_private *dev_priv);
void (*oa_disable)(struct i915_perf_stream *stream);
/**
* @read: Copy data from the circular OA buffer into a given userspace
@ -2322,6 +2169,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
bool i915_sg_trim(struct sg_table *orig_st);
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
@ -2367,20 +2216,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
(s) != GEN_FOREVER ? (s) - 1 : 0) \
)
GENMASK((e) - 1, (s) - 1))
/*
* Returns true if Gen is in inclusive range [Start, End].
*
* Use GEN_FOREVER for unbound start and or end.
*/
/* Returns true if Gen is in inclusive range [Start, End] */
#define IS_GEN(dev_priv, s, e) \
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
@ -2461,6 +2302,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@ -2592,9 +2435,14 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
#define HAS_FULL_48BIT_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
@ -2742,9 +2590,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
return IS_BROXTON(dev_priv) && intel_vtd_active();
}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@ -3229,7 +3074,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@ -3461,6 +3306,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
/* intel_acpi.c */
#ifdef CONFIG_ACPI
@ -3482,8 +3328,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
bool state);
extern void intel_display_resume(struct drm_device *dev);
@ -3583,6 +3427,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
/* intel_combo_phy.c */
void icl_combo_phys_init(struct drm_i915_private *dev_priv);
void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,

View file

@ -0,0 +1,143 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2018 Intel Corporation
*/
#ifndef _I915_FIXED_H_
#define _I915_FIXED_H_
typedef struct {
u32 val;
} uint_fixed_16_16_t;
#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
{
return val.val == 0;
}
static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
{
uint_fixed_16_16_t fp = { .val = val << 16 };
WARN_ON(val > U16_MAX);
return fp;
}
static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2)
{
uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
return min;
}
static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2)
{
uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
return max;
}
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
{
uint_fixed_16_16_t fp = { .val = (u32)val };
WARN_ON(val > U32_MAX);
return fp;
}
static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d)
{
return DIV_ROUND_UP(val.val, d.val);
}
static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
u64 tmp;
tmp = (u64)val * mul.val;
tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
WARN_ON(tmp > U32_MAX);
return (u32)tmp;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
u64 tmp;
tmp = (u64)val.val * mul.val;
tmp = tmp >> 16;
return clamp_u64_to_fixed16(tmp);
}
static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
{
u64 tmp;
tmp = (u64)val << 16;
tmp = DIV_ROUND_UP_ULL(tmp, d);
return clamp_u64_to_fixed16(tmp);
}
static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
{
u64 tmp;
tmp = (u64)val << 16;
tmp = DIV_ROUND_UP_ULL(tmp, d.val);
WARN_ON(tmp > U32_MAX);
return (u32)tmp;
}
static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
u64 tmp;
tmp = (u64)val * mul.val;
return clamp_u64_to_fixed16(tmp);
}
static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
uint_fixed_16_16_t add2)
{
u64 tmp;
tmp = (u64)add1.val + add2.val;
return clamp_u64_to_fixed16(tmp);
}
static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
u32 add2)
{
uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
u64 tmp;
tmp = (u64)add1.val + tmp_add2.val;
return clamp_u64_to_fixed16(tmp);
}
#endif /* _I915_FIXED_H_ */

View file

@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
*/
static void check_release_pagevec(struct pagevec *pvec)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
cond_resched();
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct pagevec pvec;
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
@ -2483,7 +2502,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
mutex_unlock(&obj->mm.lock);
}
static bool i915_sg_trim(struct sg_table *orig_st)
bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
struct pagevec pvec;
gfp_t noreclaim;
int ret;
@ -2559,6 +2579,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
@ -2573,6 +2594,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp_t gfp = noreclaim;
do {
cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (likely(!IS_ERR(page)))
break;
@ -2583,7 +2605,6 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
}
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
cond_resched();
/*
* We've tried hard to allocate the memory by reaping
@ -2673,8 +2694,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_sg:
sg_mark_end(sg);
err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
mapping_clear_unevictable(mapping);
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, st) {
if (!pagevec_add(&pvec, page))
check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
destroy_rcu_head(&s->rcu);
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
@ -3646,6 +3675,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
init_rcu_head(&s->rcu);
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
start = ktime_get();
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
I915_WAIT_ALL,
to_wait_timeout(args->timeout_ns),
to_rps_client(file));
@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);
obj->ops = ops;
reservation_object_init(&obj->__builtin_resv);
@ -4976,6 +5010,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
* We reuse obj->rcu for the freed list, so we had better not treat
* it like a rcu_head from this point forwards. And we expect all
* objects to be freed via this path.
*/
destroy_rcu_head(&obj->rcu);
/*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev_priv)) {
if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
} else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
intel_gt_workarounds_apply(dev_priv);
i915_gem_init_swizzling(dev_priv);
@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
sizeof(atomic_t) * BITS_PER_BYTE);
BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));

View file

@ -47,17 +47,19 @@ struct drm_i915_private;
#define GEM_DEBUG_DECL(var) var
#define GEM_DEBUG_EXEC(expr) expr
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
#define GEM_SHOW_DEBUG() (0)
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
#define GEM_DEBUG_DECL(var)
#define GEM_DEBUG_EXEC(expr) do { } while (0)
#define GEM_DEBUG_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)

View file

@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ctx))
return ctx;
if (USES_FULL_PPGTT(dev_priv)) {
if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
ctx = __create_hw_context(to_i915(dev), NULL);
ctx = i915_gem_create_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
}
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = prio;
ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
args->value = ctx->sched.priority;
args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
default:
ret = -EINVAL;
@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
ctx->sched.priority = priority;
ctx->sched.priority =
I915_USER_PRIORITY(priority);
}
break;

View file

@ -163,6 +163,7 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
struct intel_engine_cs *active;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;

View file

@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
len = 3;
len = 6;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
/* And again for good measure (blb/pnv) */
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
}
goto out;

View file

@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
i915->ggtt.invalidate(i915);
}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
if (!dev_priv->info.has_aliasing_ppgtt)
return 0;
has_full_ppgtt = dev_priv->info.has_full_ppgtt;
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
if (intel_vgpu_active(dev_priv)) {
/* GVT-g has no support for 32bit ppgtt */
has_full_ppgtt = false;
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
}
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
if (enable_ppgtt == 1)
return 1;
if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
/* Disable ppgtt on SNB if VT-d is on. */
if (IS_GEN6(dev_priv) && intel_vtd_active()) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
if (has_full_48bit_ppgtt)
return 3;
if (has_full_ppgtt)
return 2;
return 1;
}
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
static u64 gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 unused)
static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 unused)
static u64 ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
static u64 byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 unused)
static u64 hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 unused)
static u64 iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
* huge-gtt-pages, see also i915_vma_insert().
*
* TODO: we should really consider write-protecting the scratch-page and
* sharing between ppgtt
* huge-gtt-pages, see also i915_vma_insert(). However, as we share the
* scratch (read-only) between all vm, we create one 64k scratch page
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill_px(vm, pt,
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
fill_px(vm, pt, vm->scratch_pte);
}
static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
fill32_px(vm, pt, vm->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
struct i915_page_table *pt,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
vaddr[pte++] = scratch_pte;
vaddr[pte++] = vm->scratch_pte;
kunmap_atomic(vaddr);
return false;
@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
encode = pte_encode | vma->vm->scratch_page.daddr;
encode = vma->vm->scratch_pte;
vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
for (i = 1; i < index; i += 16)
@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
/*
* If everybody agrees to not to write into the scratch page,
* we can reuse it for all vm, keeping contexts and processes separate.
*/
if (vm->has_read_only &&
vm->i915->kernel_context &&
vm->i915->kernel_context->ppgtt) {
struct i915_address_space *clone =
&vm->i915->kernel_context->ppgtt->vm;
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_page.order = clone->scratch_page.order;
vm->scratch_pte = clone->scratch_pte;
vm->scratch_pt = clone->scratch_pt;
vm->scratch_pd = clone->scratch_pd;
vm->scratch_pdp = clone->scratch_pdp;
return 0;
}
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
vm->scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
PTE_READ_ONLY);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
if (!vm->scratch_page.daddr)
return;
if (use_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
const gen8_pte_t scratch_pte = vm->scratch_pte;
u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
/*
* From bdw, there is support for read-only pages in the PPGTT.
*
* XXX GVT is not honouring the lack of RW in the PTE bits.
*/
ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915);
@ -1721,7 +1691,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
const gen6_pte_t scratch_pte = base->vm.scratch_pte;
struct i915_page_table *pt;
u32 pte, pde;
@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (IS_ERR(pt))
goto unwind_out;
gen6_initialize_pt(ppgtt, pt);
gen6_initialize_pt(vm, pt);
ppgtt->base.pd.page_table[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
if (ret)
return ret;
ppgtt->scratch_pte =
vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_NONE, PTE_READ_ONLY);
vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_NONE,
PTE_READ_ONLY);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
return PTR_ERR(vm->scratch_pt);
}
gen6_initialize_pt(ppgtt, vm->scratch_pt);
gen6_initialize_pt(vm, vm->scratch_pt);
gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0;
if (!USES_PPGTT(dev_priv))
return 0;
if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev_priv);
else if (INTEL_GEN(dev_priv) >= 8)
gen8_ppgtt_enable(dev_priv);
else
MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
const gen8_pte_t scratch_pte = vm->scratch_pte;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, 0);
scratch_pte = vm->scratch_pte;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
goto err;
@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
ggtt->vm.scratch_pte =
ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
I915_CACHE_NONE, 0);
return 0;
}
@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
if (!USES_PPGTT(ppat->i915)) {
if (!HAS_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
if (intel_scanout_needs_vtd_wa(dev_priv))
ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
/* Prevent recursively calling stop_machine() and deadlocks. */
dev_info(dev_priv->drm.dev,
"Disabling error capture for VT-d workaround\n");
i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;
@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
ggtt->vm.pte_encode = gen8_pte_encode;
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
static struct scatterlist *
rotate_pages(const dma_addr_t *in, unsigned int offset,
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int src_idx;
for (column = 0; column < width; column++) {
src_idx = stride * (height - 1) + column;
src_idx = stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = in[offset + src_idx];
sg_dma_address(sg) =
i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
@ -3742,22 +3697,11 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
page_addr_list = kvmalloc_array(n_pages,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!page_addr_list)
return ERR_PTR(ret);
int i;
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
if (ret)
goto err_sg_alloc;
/* Populate source page list from the object. */
i = 0;
for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
rot_info->plane[i].stride, st, sg);
}
kvfree(page_addr_list);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
kvfree(page_addr_list);
DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
i915_sg_trim(st); /* Drop any unused tail entries. */
return st;
}

View file

@ -289,6 +289,7 @@ struct i915_address_space {
struct mutex mutex; /* protects vma and our lists */
u64 scratch_pte;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@ -335,12 +336,11 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
u64 (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
gen6_pte_t scratch_pte;
unsigned int pin_count;
bool scan_for_unused_pt;

View file

@ -27,7 +27,7 @@
*
*/
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
@ -512,7 +512,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
if (INTEL_GEN(m->i915) >= 8) {
@ -648,9 +648,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
return 0;
}
if (IS_ERR(error))
return PTR_ERR(error);
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "Kernel: %s\n", init_utsname()->release);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@ -999,7 +1002,6 @@ i915_error_object_create(struct drm_i915_private *i915,
}
compress_fini(&compress, dst);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@ -1268,7 +1270,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
if (USES_PPGTT(dev_priv)) {
if (HAS_PPGTT(dev_priv)) {
int i;
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@ -1785,6 +1787,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
return epoch;
}
static void capture_finish(struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &error->i915->ggtt;
const u64 slot = ggtt->error_capture.start;
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
}
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@ -1809,6 +1819,7 @@ static int capture(void *data)
error->epoch = capture_find_epoch(error);
capture_finish(error);
return 0;
}
@ -1859,6 +1870,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
i915_disable_error_state(i915, -ENOMEM);
return;
}
@ -1914,5 +1926,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
i915_gpu_state_put(error);
if (!IS_ERR(error))
i915_gpu_state_put(error);
}
void i915_disable_error_state(struct drm_i915_private *i915, int err)
{
spin_lock_irq(&i915->gpu_error.lock);
if (!i915->gpu_error.first_error)
i915->gpu_error.first_error = ERR_PTR(err);
spin_unlock_irq(&i915->gpu_error.lock);
}

View file

@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
static inline struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
return NULL;
return ERR_PTR(-ENODEV);
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
static inline void i915_disable_error_state(struct drm_i915_private *i915,
int err)
{
}
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */

View file

@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
return ret;
}
static inline u32 gen8_master_intr_disable(void __iomem * const regs)
{
raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
/*
* Now with master disabled, get a sample of level indications
* for this interrupt. Indications will be cleared on related acks.
* New indications can and will light up during processing,
* and will generate new interrupt after enabling master.
*/
return raw_reg_read(regs, GEN8_MASTER_IRQ);
}
static inline void gen8_master_intr_enable(void __iomem * const regs)
{
raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
}
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
void __iomem * const regs = dev_priv->regs;
u32 master_ctl;
u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
master_ctl = gen8_master_intr_disable(regs);
if (!master_ctl) {
gen8_master_intr_enable(regs);
return IRQ_NONE;
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
}
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(dev_priv);
}
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
gen8_master_intr_enable(regs);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
intel_opregion_asle_intr(dev_priv);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
{
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
/*
* Now with master disabled, get a sample of level indications
* for this interrupt. Indications will be cleared on related acks.
* New indications can and will light up during processing,
* and will generate new interrupt after enabling master.
*/
return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
}
static inline void gen11_master_intr_enable(void __iomem * const regs)
{
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
master_ctl &= ~GEN11_MASTER_IRQ;
if (!master_ctl)
master_ctl = gen11_master_intr_disable(regs);
if (!master_ctl) {
gen11_master_intr_enable(regs);
return IRQ_NONE;
/* Disable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
}
/* Find, clear, then process each source of interrupt. */
gen11_gt_irq_handler(i915, master_ctl);
@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
gen11_master_intr_enable(regs);
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_master_intr_disable(dev_priv->regs);
gen8_gt_irq_reset(dev_priv);
@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
gen11_master_intr_disable(dev_priv->regs);
gen11_gt_irq_reset(dev_priv);
I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
I915_WRITE(EDP_PSR_IMR, 0xffffffff);
I915_WRITE(EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_master_intr_enable(dev_priv->regs);
return 0;
}
@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
gen11_master_intr_enable(dev_priv->regs);
return 0;
}
@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_BDW_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_BXT_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CFLGT2_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CFLGT3_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CHV_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CNL_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_GLK_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_HSW_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_ICL_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_KBLGT2_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_KBLGT3_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT2_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT3_H__

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>

View file

@ -1,29 +1,10 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_SKLGT4_H__

View file

@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled) "
@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
#endif
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
BUILD_BUG();
WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
type, name);
}
/**

View file

@ -41,7 +41,6 @@ struct drm_printer;
param(int, vbt_sdvo_panel_type, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \

View file

@ -33,19 +33,30 @@
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
}
#define GEN_CHV_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@ -252,7 +263,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_aliasing_ppgtt = 1, \
.ppgtt = INTEL_PPGTT_ALIASING, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@ -297,8 +308,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.ppgtt = INTEL_PPGTT_FULL, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
@ -351,8 +361,7 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6 = 1,
.has_gmch_display = 1,
.has_hotplug = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@ -399,7 +408,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \
.ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@ -443,8 +452,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@ -472,6 +480,8 @@ static const struct intel_device_info intel_cherryview_info = {
#define SKL_PLATFORM \
GEN9_FEATURES, \
/* Display WA #0477 WaDisableIPC: skl */ \
.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@ -518,9 +528,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
.ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
@ -598,6 +606,22 @@ static const struct intel_device_info intel_cannonlake_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1
@ -663,7 +687,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@ -671,6 +695,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),

View file

@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
dev_priv->perf.oa.ops.oa_disable(dev_priv);
dev_priv->perf.oa.ops.oa_enable(dev_priv);
dev_priv->perf.oa.ops.oa_disable(stream);
dev_priv->perf.oa.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
dev_priv->perf.oa.ops.oa_disable(dev_priv);
dev_priv->perf.oa.ops.oa_enable(dev_priv);
dev_priv->perf.oa.ops.oa_disable(stream);
dev_priv->perf.oa.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto err_unpin;
}
dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
dev_priv->perf.oa.oa_buffer.vaddr);
@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
const struct i915_oa_config *oa_config = stream->oa_config;
/* PRM:
*
* OA unit is using crclk for its functionality. When trunk
@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
return 0;
}
static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
const struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
}
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
struct i915_gem_context *ctx =
dev_priv->perf.oa.exclusive_stream->ctx;
struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
bool periodic = dev_priv->perf.oa.periodic;
u32 period_exponent = dev_priv->perf.oa.period_exponent;
@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct drm_i915_private *dev_priv)
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
u32 report_format = dev_priv->perf.oa.oa_buffer.format;
/*
@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
dev_priv->perf.oa.ops.oa_enable(dev_priv);
dev_priv->perf.oa.ops.oa_enable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
HRTIMER_MODE_REL_PINNED);
}
static void gen7_oa_disable(struct drm_i915_private *dev_priv)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
I915_WRITE(GEN7_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct drm_i915_private *dev_priv)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
I915_WRITE(GEN8_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
dev_priv->perf.oa.ops.oa_disable(dev_priv);
dev_priv->perf.oa.ops.oa_disable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
if (!dev_priv->perf.oa.ops.init_oa_buffer) {
if (!dev_priv->perf.oa.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_lock;
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config);
ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*/
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;

View file

@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices *
DIV_ROUND_UP(sseu->max_subslices,
sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
eu_length = sseu->max_slices * sseu->max_subslices *
DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);

File diff suppressed because it is too large Load diff

View file

@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
static struct i915_dependency *
i915_dependency_alloc(struct drm_i915_private *i915)
{
return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
}
static void
i915_dependency_free(struct drm_i915_private *i915,
struct i915_dependency *dep)
{
kmem_cache_free(i915->dependencies, dep);
}
static void
__i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags)
{
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
}
static int
i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
dep = i915_dependency_alloc(i915);
if (!dep)
return -ENOMEM;
__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC);
return 0;
}
static void
i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
/*
* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}
}
static void
i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
@ -221,6 +136,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
kthread_park(engine->breadcrumbs.signaler);
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
@ -235,6 +152,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
engine->timeline.seqno = seqno;
kthread_unpark(engine->breadcrumbs.signaler);
}
list_for_each_entry(timeline, &i915->gt.timelines, link)
@ -740,17 +659,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (rq)
cond_synchronize_rcu(rq->rcustate);
/*
* We've forced the client to stall and catch up with whatever
* backlog there might have been. As we are assuming that we
* caused the mempressure, now is an opportune time to
* recover as much memory from the request pool as is possible.
* Having already penalized the client to stall, we spend
* a little extra time to re-optimise page allocation.
*/
kmem_cache_shrink(i915->requests);
rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
@ -1127,8 +1035,20 @@ void i915_request_add(struct i915_request *request)
*/
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(request, &request->gem_context->sched);
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
/*
* Boost priorities to new clients (new request flows).
*
* Allow interactive/synchronous clients to jump ahead of
* the bulk clients. (FQ_CODEL)
*/
if (!prev || i915_request_completed(prev))
attr.priority |= I915_PRIORITY_NEWCLIENT;
engine->schedule(request, &attr);
}
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@ -1310,6 +1230,8 @@ long i915_request_wait(struct i915_request *rq,
add_wait_queue(errq, &reset);
intel_wait_init(&wait);
if (flags & I915_WAIT_PRIORITY)
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
restart:
do {

View file

@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}
static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
const struct i915_request *rq =
container_of(node, const struct i915_request, sched);
return i915_request_completed(rq);
}
void i915_retire_requests(struct drm_i915_private *i915);
/*

View file

@ -0,0 +1,399 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/mutex.h>
#include "i915_drv.h"
#include "i915_request.h"
#include "i915_scheduler.h"
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
node_to_request(const struct i915_sched_node *node)
{
return container_of(node, const struct i915_request, sched);
}
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
}
void i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
}
static struct i915_dependency *
i915_dependency_alloc(struct drm_i915_private *i915)
{
return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
}
static void
i915_dependency_free(struct drm_i915_private *i915,
struct i915_dependency *dep)
{
kmem_cache_free(i915->dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags)
{
bool ret = false;
spin_lock(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
ret = true;
}
spin_unlock(&schedule_lock);
return ret;
}
int i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
dep = i915_dependency_alloc(i915);
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
i915_dependency_free(i915, dep);
return 0;
}
void i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
spin_lock(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
}
spin_unlock(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
static void assert_priolists(struct intel_engine_execlists * const execlists,
long queue_priority)
{
struct rb_node *rb;
long last_prio, i;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
GEM_BUG_ON(p->priority >= last_prio);
last_prio = p->priority;
GEM_BUG_ON(!p->used);
for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
if (list_empty(&p->requests[i]))
continue;
GEM_BUG_ON(!(p->used & BIT(i)));
}
}
}
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
int idx, i;
lockdep_assert_held(&engine->timeline.lock);
assert_priolists(execlists, INT_MAX);
/* buckets sorted from highest [in slot 0] to lowest priority */
idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
prio >>= I915_USER_PRIORITY_SHIFT;
if (unlikely(execlists->no_priolist))
prio = I915_PRIORITY_NORMAL;
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
parent = &execlists->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
if (prio > p->priority) {
parent = &rb->rb_left;
} else if (prio < p->priority) {
parent = &rb->rb_right;
first = false;
} else {
goto out;
}
}
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
/* To maintain ordering with all rendering, after an
* allocation failure we have to disable all scheduling.
* Requests will then be executed in fifo, and schedule
* will ensure that dependencies are emitted in fifo.
* There will be still some reordering with existing
* requests, so if userspace lied about their
* dependencies that reordering may be visible.
*/
execlists->no_priolist = true;
goto find_priolist;
}
}
p->priority = prio;
for (i = 0; i < ARRAY_SIZE(p->requests); i++)
INIT_LIST_HEAD(&p->requests[i]);
rb_link_node(&p->node, rb, parent);
rb_insert_color_cached(&p->node, &execlists->queue, first);
p->used = 0;
out:
p->used |= BIT(idx);
return &p->requests[idx];
}
static struct intel_engine_cs *
sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
{
struct intel_engine_cs *engine = node_to_request(node)->engine;
GEM_BUG_ON(!locked);
if (engine != locked) {
spin_unlock(&locked->timeline.lock);
spin_lock(&engine->timeline.lock);
}
return engine;
}
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
struct list_head *uninitialized_var(pl);
struct intel_engine_cs *engine, *last;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
const int prio = attr->priority;
LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
if (i915_request_completed(rq))
return;
if (prio <= READ_ONCE(rq->sched.attr.priority))
return;
stack.signaler = &rq->sched;
list_add(&stack.dfs_link, &dfs);
/*
* Recursively bump all dependent priorities to match the new request.
*
* A naive approach would be to use recursion:
* static void update_priorities(struct i915_sched_node *node, prio) {
* list_for_each_entry(dep, &node->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
* queue_request(node);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
* a flat list of all dependencies starting with the current request.
* As we walk the list of dependencies, we add all of its dependencies
* to the end of the list (this may include an already visited
* request) and continue to walk onwards onto the new dependencies. The
* end result is a topological list of requests in reverse order, the
* last element in the list is the request we must execute first.
*/
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
* (redundant dependencies are not eliminated) and across
* engines.
*/
list_for_each_entry(p, &node->signalers_list, signal_link) {
GEM_BUG_ON(p == dep); /* no cycles! */
if (node_signaled(p->signaler))
continue;
GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
}
/*
* If we didn't need to bump any existing priorities, and we haven't
* yet submitted this request (i.e. there is no potential race with
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&rq->sched.link));
rq->sched.attr = *attr;
if (stack.dfs_link.next == stack.dfs_link.prev)
return;
__list_del_entry(&stack.dfs_link);
}
last = NULL;
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
INIT_LIST_HEAD(&dep->dfs_link);
engine = sched_lock_engine(node, engine);
/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
continue;
node->attr.priority = prio;
if (!list_empty(&node->link)) {
if (last != engine) {
pl = i915_sched_lookup_priolist(engine, prio);
last = engine;
}
list_move_tail(&node->link, pl);
} else {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
* to our preemption decisions. On the other hand,
* if the request is on the HW, it too is not in the
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
if (!i915_sw_fence_done(&node_to_request(node)->submit))
continue;
}
if (prio <= engine->execlists.queue_priority)
continue;
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
if (node_to_request(node)->global_seqno &&
i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
node_to_request(node)->global_seqno))
continue;
/* Defer (tasklet) submission until after all of our updates. */
engine->execlists.queue_priority = prio;
tasklet_hi_schedule(&engine->execlists.tasklet);
}
spin_unlock_irq(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
spin_lock(&schedule_lock);
__i915_schedule(rq, attr);
spin_unlock(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
spin_lock_bh(&schedule_lock);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
spin_unlock_bh(&schedule_lock);
}

View file

@ -8,9 +8,14 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@ -19,6 +24,15 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 2
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
struct i915_sched_attr {
/**
* @priority: execution and service priority
@ -69,4 +83,26 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
void i915_sched_node_init(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags);
int i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
struct i915_sched_node *signal);
void i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
#endif /* _I915_SCHEDULER_H_ */

View file

@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
{
BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
*root = NULL;
}

View file

@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
const char *name);
void i915_timeline_fini(struct i915_timeline *tl);
static inline void
i915_timeline_set_subclass(struct i915_timeline *timeline,
unsigned int subclass)
{
lockdep_set_subclass(&timeline->lock, subclass);
/*
* Due to an interesting quirk in lockdep's internal debug tracking,
* after setting a subclass we must ensure the lock is used. Otherwise,
* nr_unused_locks is incremented once too often.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
local_irq_disable();
lock_map_acquire(&timeline->lock.dep_map);
lock_map_release(&timeline->lock.dep_map);
local_irq_enable();
#endif
}
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, const char *name);

View file

@ -68,7 +68,7 @@
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \

View file

@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
if (GEM_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total)))
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total)))
return -ENODEV;
if (GEM_WARN_ON(!flags))
if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;

View file

@ -25,8 +25,153 @@
* Jani Nikula <jani.nikula@intel.com>
*/
#include <drm/drm_mipi_dsi.h>
#include "intel_dsi.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
}
static void wait_for_header_credits(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
MAX_HEADER_CREDIT, 100))
DRM_ERROR("DSI header credits not released\n");
}
static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
MAX_PLOAD_CREDIT, 100))
DRM_ERROR("DSI payload credits not released\n");
}
static enum transcoder dsi_port_to_transcoder(enum port port)
{
if (port == PORT_A)
return TRANSCODER_DSI_0;
else
return TRANSCODER_DSI_1;
}
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
int ret;
/* wait for header/payload credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
wait_for_header_credits(dev_priv, dsi_trans);
wait_for_payload_credits(dev_priv, dsi_trans);
}
/* send nop DCS command */
for_each_dsi_port(port, intel_dsi->ports) {
dsi = intel_dsi->dsi_hosts[port]->device;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
dsi->channel = 0;
ret = mipi_dsi_dcs_nop(dsi);
if (ret < 0)
DRM_ERROR("error sending DCS NOP command\n");
}
/* wait for header credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
wait_for_header_credits(dev_priv, dsi_trans);
}
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
LPTX_IN_PROGRESS), 20))
DRM_ERROR("LPTX bit not cleared\n");
}
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp;
int lane;
for_each_dsi_port(port, intel_dsi->ports) {
/*
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
}
}
}
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -105,10 +250,553 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
}
}
static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp;
int lane;
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
}
}
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
/* clear common keeper enable bit */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
tmp &= ~COMMON_KEEPER_EN;
I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
tmp &= ~COMMON_KEEPER_EN;
I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
}
/*
* Set SUS Clock Config bitfield to 11b
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_CL_DW5(port));
tmp |= SUS_CLOCK_CONFIG;
I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
}
/* Clear training enable to change swing values */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
tmp &= ~TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
tmp &= ~TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
}
/* Program swing and de-emphasis */
dsi_program_swing_and_deemphasis(encoder);
/* Set training enable to trigger update */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
tmp |= TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
tmp |= TX_TRAINING_EN;
I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
}
}
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DDI_BUF_CTL(port));
tmp |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), tmp);
if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
500))
DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
}
}
static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
tmp &= ~MASTER_INIT_TIMER_MASK;
tmp |= intel_dsi->init_count;
I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
}
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
/* shadow register inside display core */
I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
}
/* Program DPHY data lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
intel_dsi->dphy_data_lane_reg);
/* shadow register inside display core */
I915_WRITE(DSI_DATA_TIMING_PARAM(port),
intel_dsi->dphy_data_lane_reg);
}
/*
* If DSI link operating at or below an 800 MHz,
* TA_SURE should be override and programmed to
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
if (intel_dsi_bitrate(intel_dsi) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
/* shadow register inside display core */
tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
}
}
}
static void
gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
enum port port;
enum transcoder dsi_trans;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
tmp &= ~EOTP_DISABLED;
else
tmp |= EOTP_DISABLED;
/* enable link calibration if freq > 1.5Gbps */
if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
tmp &= ~LINK_CALIBRATION_MASK;
tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
}
/* configure continuous clock */
tmp &= ~CONTINUOUS_CLK_MASK;
if (intel_dsi->clock_stop)
tmp |= CLK_ENTER_LP_AFTER_DATA;
else
tmp |= CLK_HS_CONTINUOUS;
/* configure buffer threshold limit to minimum */
tmp &= ~PIX_BUF_THRESHOLD_MASK;
tmp |= PIX_BUF_THRESHOLD_1_4;
/* set virtual channel to '0' */
tmp &= ~PIX_VIRT_CHAN_MASK;
tmp |= PIX_VIRT_CHAN(0);
/* program BGR transmission */
if (intel_dsi->bgr_enabled)
tmp |= BGR_TRANSMISSION;
/* select pixel format */
tmp &= ~PIX_FMT_MASK;
switch (intel_dsi->pixel_format) {
default:
MISSING_CASE(intel_dsi->pixel_format);
/* fallthrough */
case MIPI_DSI_FMT_RGB565:
tmp |= PIX_FMT_RGB565;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
tmp |= PIX_FMT_RGB666_PACKED;
break;
case MIPI_DSI_FMT_RGB666:
tmp |= PIX_FMT_RGB666_LOOSE;
break;
case MIPI_DSI_FMT_RGB888:
tmp |= PIX_FMT_RGB888;
break;
}
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
tmp &= ~OP_MODE_MASK;
switch (intel_dsi->video_mode_format) {
default:
MISSING_CASE(intel_dsi->video_mode_format);
/* fallthrough */
case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
tmp |= VIDEO_MODE_SYNC_EVENT;
break;
case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
tmp |= VIDEO_MODE_SYNC_PULSE;
break;
}
}
I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
}
/* enable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp |= PORT_SYNC_MODE_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
//TODO: configure DSS_CTL1
}
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* select data lane width */
tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
switch (pipe) {
default:
MISSING_CASE(pipe);
/* fallthrough */
case PIPE_A:
tmp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
break;
case PIPE_C:
tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
}
/* enable DDI buffer */
tmp |= TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
LINK_READY), 2500))
DRM_ERROR("DSI link not ready\n");
}
}
static void
gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
enum port port;
enum transcoder dsi_trans;
/* horizontal timings */
u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
u16 hfront_porch, hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
hactive = adjusted_mode->crtc_hdisplay;
htotal = adjusted_mode->crtc_htotal;
hsync_start = adjusted_mode->crtc_hsync_start;
hsync_end = adjusted_mode->crtc_hsync_end;
hsync_size = hsync_end - hsync_start;
hfront_porch = (adjusted_mode->crtc_hsync_start -
adjusted_mode->crtc_hdisplay);
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
vtotal = adjusted_mode->crtc_vtotal;
vsync_start = adjusted_mode->crtc_vsync_start;
vsync_end = adjusted_mode->crtc_vsync_end;
vsync_shift = hsync_start - htotal / 2;
if (intel_dsi->dual_link) {
hactive /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
hactive += intel_dsi->pixel_overlap;
htotal /= 2;
}
/* minimum hactive as per bspec: 256 pixels */
if (adjusted_mode->crtc_hdisplay < 256)
DRM_ERROR("hactive is less then 256 pixels\n");
/* if RGB666 format, then hactive must be multiple of 4 pixels */
if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
DRM_ERROR("hactive pixels are not multiple of 4\n");
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(HTOTAL(dsi_trans),
(hactive - 1) | ((htotal - 1) << 16));
}
/* TRANS_HSYNC register to be programmed only for video mode */
if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
if (intel_dsi->video_mode_format ==
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
DRM_ERROR("hsync size < 16 pixels\n");
}
if (hback_porch < 16)
DRM_ERROR("hback porch < 16 pixels\n");
if (intel_dsi->dual_link) {
hsync_start /= 2;
hsync_end /= 2;
}
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(HSYNC(dsi_trans),
(hsync_start - 1) | ((hsync_end - 1) << 16));
}
}
/* program TRANS_VTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/*
* FIXME: Programing this by assuming progressive mode, since
* non-interlaced info from VBT is not saved inside
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
I915_WRITE(VTOTAL(dsi_trans),
(vactive - 1) | ((vtotal - 1) << 16));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
DRM_ERROR("Invalid vsync_end value\n");
if (vsync_start < vactive)
DRM_ERROR("vsync_start less than vactive\n");
/* program TRANS_VSYNC register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNC(dsi_trans),
(vsync_start - 1) | ((vsync_end - 1) << 16));
}
/*
* FIXME: It has to be programmed only for interlaced
* modes. Put the check condition here once interlaced
* info available as described above.
* program TRANS_VSYNCSHIFT register
*/
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
}
}
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(PIPECONF(dsi_trans));
tmp |= PIPECONF_ENABLE;
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
}
}
static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum transcoder dsi_trans;
u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
/*
* escape clock count calculation:
* BYTE_CLK_COUNT = TIME_NS/(8 * UI)
* UI (nsec) = (10^6)/Bitrate
* TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
* ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
*/
divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
mul = 8 * 1000000;
hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
divisor);
lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
tmp = I915_READ(DSI_TA_TO(dsi_trans));
tmp &= ~TA_TIMEOUT_VALUE_MASK;
tmp |= TA_TIMEOUT_VALUE(ta_timeout);
I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
}
}
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
/* step 4b: configure lane sequencing of the Combo-PHY transmitters */
gen11_dsi_config_phy_lanes_sequence(encoder);
/* step 4c: configure voltage swing and skew */
gen11_dsi_voltage_swing_program_seq(encoder);
/* enable DDI buffer */
gen11_dsi_enable_ddi_buffer(encoder);
/* setup D-PHY timings */
gen11_dsi_setup_dphy_timings(encoder);
/* step 4h: setup DSI protocol timeouts */
gen11_dsi_setup_timeouts(encoder);
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
gen11_dsi_configure_transcoder(encoder, pipe_config);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
u32 tmp;
int ret;
/* set maximum return packet size */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/*
* FIXME: This uses the number of DW's currently in the payload
* receive queue. This is probably not what we want here.
*/
tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
tmp &= NUMBER_RX_PLOAD_DW_MASK;
/* multiply "Number Rx Payload DW" by 4 to get max value */
tmp = tmp * 4;
dsi = intel_dsi->dsi_hosts[port]->device;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
if (ret < 0)
DRM_ERROR("error setting max return pkt size%d\n", tmp);
}
/* panel power on related mipi dsi vbt sequences */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
}
static void __attribute__((unused))
@ -116,6 +804,8 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
@ -123,5 +813,169 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder,
gen11_dsi_program_esc_clk_div(encoder);
/* step4: enable DSI port and DPHY */
gen11_dsi_enable_port_and_phy(encoder);
gen11_dsi_enable_port_and_phy(encoder, pipe_config);
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
intel_panel_enable_backlight(pipe_config, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
tmp = I915_READ(PIPECONF(dsi_trans));
tmp &= ~PIPECONF_ENABLE;
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
}
static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
/* ensure cmds dispatched to panel */
wait_for_cmds_dispatched_to_panel(encoder);
}
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(DSI_LP_MSG(dsi_trans));
tmp |= LINK_ENTER_ULPS;
tmp &= ~LINK_ULPS_TYPE_LP11;
I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
LINK_IN_ULPS),
10))
DRM_ERROR("DSI link not in ULPS\n");
}
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp &= ~PORT_SYNC_MODE_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
}
}
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), tmp);
if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
8))
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp;
intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
if (intel_dsi->dual_link)
intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
tmp &= ~COMBO_PHY_MODE_DSI;
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
}
}
static void __attribute__((unused)) gen11_dsi_disable(
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(old_conn_state);
/* step2d,e: disable transcoder and wait */
gen11_dsi_disable_transcoder(encoder);
/* step2f,g: powerdown panel */
gen11_dsi_powerdown_panel(encoder);
/* step2h,i,j: deconfig trancoder */
gen11_dsi_deconfigure_trancoder(encoder);
/* step3: disable port */
gen11_dsi_disable_port(encoder);
/* step4: disable IO power */
gen11_dsi_disable_io_power(encoder);
}
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
}

View file

@ -203,6 +203,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
int num_scalers_need, struct intel_crtc *intel_crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int j;
u32 mode;
if (*scaler_id < 0) {
/* find a free scaler */
for (j = 0; j < intel_crtc->num_scalers; j++) {
if (scaler_state->scalers[j].in_use)
continue;
*scaler_id = j;
scaler_state->scalers[*scaler_id].in_use = 1;
break;
}
}
if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
return;
/* set scaler mode */
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
if (IS_GEN9(dev_priv) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
/*
* On gen11+'s HDR planes we only use the scaler for
* scaling. They have a dedicated chroma upsampler, so
* we don't need the scaler to upsample the UV plane.
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
mode = PS_SCALER_MODE_PLANAR;
if (plane_state->linked_plane)
mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
/*
* when only 1 scaler is in use on a pipe with 2 scalers
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
scaler_state->scalers[*scaler_id].in_use = 0;
*scaler_id = 0;
scaler_state->scalers[0].in_use = 1;
mode = SKL_PS_SCALER_MODE_HQ;
} else {
mode = SKL_PS_SCALER_MODE_DYN;
}
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
}
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
@ -232,7 +298,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct drm_atomic_state *drm_state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i, j;
int i;
num_scalers_need = hweight32(scaler_state->scaler_users);
@ -304,59 +370,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
continue;
}
plane_state = intel_atomic_get_new_plane_state(intel_state,
intel_plane);
scaler_id = &plane_state->scaler_id;
}
if (*scaler_id < 0) {
/* find a free scaler */
for (j = 0; j < intel_crtc->num_scalers; j++) {
if (!scaler_state->scalers[j].in_use) {
scaler_state->scalers[j].in_use = 1;
*scaler_id = j;
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
intel_crtc->pipe, *scaler_id, name, idx);
break;
}
}
}
if (WARN_ON(*scaler_id < 0)) {
DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
continue;
}
/* set scaler mode */
if ((INTEL_GEN(dev_priv) >= 9) &&
plane_state && plane_state->base.fb &&
plane_state->base.fb->format->format ==
DRM_FORMAT_NV12) {
if (INTEL_GEN(dev_priv) == 9 &&
!IS_GEMINILAKE(dev_priv) &&
!IS_SKYLAKE(dev_priv))
scaler_state->scalers[*scaler_id].mode =
SKL_PS_SCALER_MODE_NV12;
else
scaler_state->scalers[*scaler_id].mode =
PS_SCALER_MODE_PLANAR;
} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*
* when only 1 scaler is in use on either pipe A or B,
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
*scaler_id = 0;
scaler_state->scalers[0].in_use = 1;
scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
scaler_state->scalers[1].in_use = 0;
} else {
scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
}
intel_atomic_setup_scaler(scaler_state, num_scalers_need,
intel_crtc, name, idx,
plane_state, scaler_id);
}
return 0;

View file

@ -36,28 +36,31 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
/**
* intel_create_plane_state - create plane state object
* @plane: drm plane
*
* Allocates a fresh plane state for the given plane and sets some of
* the state values to sensible initial values.
*
* Returns: A newly allocated plane state, or NULL on failure
*/
struct intel_plane_state *
intel_create_plane_state(struct drm_plane *plane)
struct intel_plane *intel_plane_alloc(void)
{
struct intel_plane_state *state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
state->base.plane = plane;
state->base.rotation = DRM_MODE_ROTATE_0;
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state) {
kfree(plane);
return ERR_PTR(-ENOMEM);
}
return state;
__drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
plane_state->scaler_id = -1;
return plane;
}
void intel_plane_free(struct intel_plane *plane)
{
intel_plane_destroy_state(&plane->base, plane->base.state);
kfree(plane);
}
/**
@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret;
crtc_state->active_planes &= ~BIT(intel_plane->id);
crtc_state->nv12_planes &= ~BIT(intel_plane->id);
intel_state->base.visible = false;
/* If this is a cursor plane, no further checks are needed. */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
@ -128,13 +135,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
else
crtc_state->nv12_planes &= ~BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
@ -152,6 +155,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
new_plane_state->visible = false;
if (!crtc)
return 0;
@ -164,29 +168,52 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
struct intel_plane *intel_plane = to_intel_plane(plane);
const struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, intel_plane);
struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
u32 update_mask;
int i;
if (new_plane_state->base.visible) {
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
update_mask = old_crtc_state->active_planes;
update_mask |= new_crtc_state->active_planes;
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
if (crtc->pipe != plane->pipe ||
!(update_mask & BIT(plane->id)))
continue;
intel_plane->update_plane(intel_plane,
new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
if (new_plane_state->base.visible) {
trace_intel_update_plane(&plane->base, crtc);
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
plane->update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
/*
* We update the slave plane from this function because
* programming it from the master plane's update_plane
* callback runs into issues when the Y plane is
* reassigned, disabled or used by a different plane.
*
* The slave plane is updated with the master plane's
* plane_state.
*/
new_plane_state =
intel_atomic_get_new_plane_state(old_state, master);
trace_intel_update_plane(&plane->base, crtc);
plane->update_slave(plane, new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc);
}
}
}
@ -194,7 +221,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
.atomic_update = intel_plane_atomic_update,
};
/**

View file

@ -153,32 +153,32 @@ static const struct {
int n;
int cts;
} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
{ 48000, TMDS_297M, 5120, 247500 },
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
{ 96000, TMDS_296M, 11648, 281250 },
{ 96000, TMDS_297M, 10240, 247500 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
{ 44100, TMDS_593M, 8918, 937500 },
{ 44100, TMDS_594M, 9408, 990000 },
{ 48000, TMDS_593M, 5824, 562500 },
{ 48000, TMDS_594M, 6144, 594000 },
{ 32000, TMDS_593M, 5824, 843750 },
{ 32000, TMDS_594M, 3072, 445500 },
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 44100, TMDS_593M, 8918, 937500 },
{ 44100, TMDS_594M, 9408, 990000 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
{ 88200, TMDS_593M, 17836, 937500 },
{ 88200, TMDS_594M, 18816, 990000 },
{ 96000, TMDS_593M, 11648, 562500 },
{ 96000, TMDS_594M, 12288, 594000 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
{ 176400, TMDS_593M, 35672, 937500 },
{ 176400, TMDS_594M, 37632, 990000 },
{ 48000, TMDS_296M, 5824, 281250 },
{ 48000, TMDS_297M, 5120, 247500 },
{ 48000, TMDS_593M, 5824, 562500 },
{ 48000, TMDS_594M, 6144, 594000 },
{ 96000, TMDS_296M, 11648, 281250 },
{ 96000, TMDS_297M, 10240, 247500 },
{ 96000, TMDS_593M, 11648, 562500 },
{ 96000, TMDS_594M, 12288, 594000 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
{ 192000, TMDS_593M, 23296, 562500 },
{ 192000, TMDS_594M, 24576, 594000 },
};
@ -929,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = &i915_audio_component_ops;
acomp->base.dev = i915_kdev;
@ -952,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
}
static const struct component_ops i915_audio_component_bind_ops = {

View file

@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
if (bdb->version >= 181) {
dev_priv->vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
parse_dsi_backlight_ports(dev_priv, bdb->version, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
case ENABLE_ROTATION_0:
/*
* Most (all?) VBTs claim 0 degrees despite having
* an upside down panel, thus we do not trust this.
*/
dev_priv->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
break;
case ENABLE_ROTATION_90:
dev_priv->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
break;
case ENABLE_ROTATION_180:
dev_priv->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
break;
case ENABLE_ROTATION_270:
dev_priv->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
break;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
switch (dvo_port) {
case DVO_PORT_MIPIA:
case DVO_PORT_MIPIC:
if (dvo_port == DVO_PORT_MIPIA ||
(dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
(dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
case DVO_PORT_MIPIB:
case DVO_PORT_MIPID:
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
break;
}
}
@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
return false;
}
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum aux_ch aux_ch;
if (!info->alternate_aux_channel) {
aux_ch = (enum aux_ch)port;
DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
switch (info->alternate_aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
case DP_AUX_B:
aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
break;
}
DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}

View file

@ -2660,39 +2660,20 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
fraction = 200;
}
rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
if (fraction)
rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
fraction) - 1);
rawclk = CNP_RAWCLK_DIV(divider / 1000);
if (fraction) {
int numerator = 1;
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
if (HAS_PCH_ICP(dev_priv))
rawclk |= ICP_RAWCLK_NUM(numerator);
}
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
static int icp_rawclk(struct drm_i915_private *dev_priv)
{
u32 rawclk;
int divider, numerator, denominator, frequency;
if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
frequency = 24000;
divider = 23;
numerator = 0;
denominator = 0;
} else {
frequency = 19200;
divider = 18;
numerator = 1;
denominator = 4;
}
rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
ICP_RAWCLK_DEN(denominator);
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
return frequency;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
@ -2740,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_ICP(dev_priv))
dev_priv->rawclk_freq = icp_rawclk(dev_priv);
else if (HAS_PCH_CNP(dev_priv))
if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);

View file

@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = intel_crtc_state->limited_color_range;
if (intel_crtc_state->ycbcr420) {
if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {

View file

@ -0,0 +1,254 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include "intel_drv.h"
#define for_each_combo_port(__dev_priv, __port) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
for_each_if(intel_port_is_combophy(__dev_priv, __port))
#define for_each_combo_port_reverse(__dev_priv, __port) \
for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
for_each_if(intel_port_is_combophy(__dev_priv, __port))
enum {
PROCMON_0_85V_DOT_0,
PROCMON_0_95V_DOT_0,
PROCMON_0_95V_DOT_1,
PROCMON_1_05V_DOT_0,
PROCMON_1_05V_DOT_1,
};
static const struct cnl_procmon {
u32 dw1, dw9, dw10;
} cnl_procmon_values[] = {
[PROCMON_0_85V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
[PROCMON_0_95V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
[PROCMON_0_95V_DOT_1] =
{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
[PROCMON_1_05V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
[PROCMON_1_05V_DOT_1] =
{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
/*
* CNL has just one set of registers, while ICL has two sets: one for port A and
* the other for port B. The CNL registers are equivalent to the ICL port A
* registers, that's why we call the ICL macros even though the function has CNL
* on its name.
*/
static const struct cnl_procmon *
cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
{
const struct cnl_procmon *procmon;
u32 val;
val = I915_READ(ICL_PORT_COMP_DW3(port));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
/* fall through */
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
break;
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
break;
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
break;
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
break;
}
return procmon;
}
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum port port)
{
const struct cnl_procmon *procmon;
u32 val;
procmon = cnl_get_procmon_ref_values(dev_priv, port);
val = I915_READ(ICL_PORT_COMP_DW1(port));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
I915_WRITE(ICL_PORT_COMP_DW1(port), val);
I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t reg, u32 mask,
u32 expected_val)
{
u32 val = I915_READ(reg);
if ((val & mask) != expected_val) {
DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
"current %08x mask %08x expected %08x\n",
port_name(port),
reg.reg, val, mask, expected_val);
return false;
}
return true;
}
static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
enum port port)
{
const struct cnl_procmon *procmon;
bool ret;
procmon = cnl_get_procmon_ref_values(dev_priv, port);
ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
(0xff << 16) | 0xff, procmon->dw1);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
-1U, procmon->dw9);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
-1U, procmon->dw10);
return ret;
}
static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
{
return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
(I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
}
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
{
enum port port = PORT_A;
bool ret;
if (!cnl_combo_phy_enabled(dev_priv))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, port);
ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
val = I915_READ(CHICKEN_MISC_2);
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
cnl_set_procmon_ref_values(dev_priv, PORT_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
I915_WRITE(CNL_PORT_COMP_DW0, val);
val = I915_READ(CNL_PORT_CL1CM_DW5);
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
}
void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
u32 val;
if (!cnl_combo_phy_verify_state(dev_priv))
DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
val = I915_READ(CHICKEN_MISC_2);
val |= CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
enum port port)
{
return !(I915_READ(ICL_PHY_MISC(port)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum port port)
{
bool ret;
if (!icl_combo_phy_enabled(dev_priv, port))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, port);
ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
enum port port;
for_each_combo_port(dev_priv, port) {
u32 val;
if (icl_combo_phy_verify_state(dev_priv, port)) {
DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
port_name(port));
continue;
}
val = I915_READ(ICL_PHY_MISC(port));
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
I915_WRITE(ICL_PHY_MISC(port), val);
cnl_set_procmon_ref_values(dev_priv, port);
val = I915_READ(ICL_PORT_COMP_DW0(port));
val |= COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
val = I915_READ(ICL_PORT_CL_DW5(port));
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(ICL_PORT_CL_DW5(port), val);
}
}
void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
enum port port;
for_each_combo_port_reverse(dev_priv, port) {
u32 val;
if (!icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
val = I915_READ(ICL_PHY_MISC(port));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
I915_WRITE(ICL_PHY_MISC(port), val);
val = I915_READ(ICL_PORT_COMP_DW0(port));
val &= ~COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
}
}

View file

@ -25,11 +25,140 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
/*
* Allocate enough memory to hold intel_digital_connector_state,
* This might be a few bytes too many, but for connectors that don't
* need it we'll free the state and allocate a smaller one on the first
* successful commit anyway.
*/
conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (!conn_state)
return -ENOMEM;
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
return 0;
}
struct intel_connector *intel_connector_alloc(void)
{
struct intel_connector *connector;
connector = kzalloc(sizeof(*connector), GFP_KERNEL);
if (!connector)
return NULL;
if (intel_connector_init(connector) < 0) {
kfree(connector);
return NULL;
}
return connector;
}
/*
* Free the bits allocated by intel_connector_alloc.
* This should only be used after intel_connector_alloc has returned
* successfully, and before drm_connector_init returns successfully.
* Otherwise the destroy callbacks for the connector and the state should
* take care of proper cleanup/free (see intel_connector_destroy).
*/
void intel_connector_free(struct intel_connector *connector)
{
kfree(to_intel_digital_connector_state(connector->base.state));
kfree(connector);
}
/*
* Connector type independent destroy hook for drm_connector_funcs.
*/
void intel_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
kfree(intel_connector->detect_edid);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
kfree(connector);
}
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
int ret;
ret = intel_backlight_device_register(intel_connector);
if (ret)
goto err;
if (i915_inject_load_failure()) {
ret = -EFAULT;
goto err_backlight;
}
return 0;
err_backlight:
intel_backlight_device_unregister(intel_connector);
err:
return ret;
}
void intel_connector_unregister(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
intel_backlight_device_unregister(intel_connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
drm_connector_attach_encoder(&connector->base, &encoder->base);
}
/*
* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector.
*/
bool intel_connector_get_hw_state(struct intel_connector *connector)
{
enum pipe pipe = 0;
struct intel_encoder *encoder = connector->encoder;
return encoder->get_hw_state(encoder, &pipe);
}
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
return to_intel_crtc(connector->base.state->crtc)->pipe;
}
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use

View file

@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@ -849,12 +852,6 @@ intel_crt_detect(struct drm_connector *connector,
return status;
}
static void intel_crt_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
kfree(connector);
}
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_crt_destroy,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};

View file

@ -34,34 +34,38 @@
* low-power state and comes back to normal.
*/
#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_ICL);
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_CNL);
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define BXT_CSR_MAX_FW_SIZE 0x3000
#define GLK_CSR_MAX_FW_SIZE 0x4000
#define ICL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_CSR_PATH);
#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
MODULE_FIRMWARE(CNL_CSR_PATH);
#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define GLK_CSR_MAX_FW_SIZE 0x4000
MODULE_FIRMWARE(GLK_CSR_PATH);
#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
MODULE_FIRMWARE(KBL_CSR_PATH);
#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
MODULE_FIRMWARE(SKL_CSR_PATH);
#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define BXT_CSR_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_CSR_PATH);
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
struct intel_css_header {
@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
static const struct stepping_info icl_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '2'},
{'C', '0'}
};
static const struct stepping_info no_stepping_info = { '*', '*' };
static const struct stepping_info *
@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
if (IS_SKYLAKE(dev_priv)) {
if (IS_ICELAKE(dev_priv)) {
size = ARRAY_SIZE(icl_stepping_info);
si = icl_stepping_info;
} else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t max_fw_size = 0;
uint32_t i;
uint32_t *dmc_payload;
uint32_t required_version;
if (!fw)
return NULL;
@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
csr->version = css_header->version;
if (csr->fw_path == i915_modparams.dmc_firmware_path) {
/* Bypass version check for firmware override. */
required_version = csr->version;
} else if (IS_ICELAKE(dev_priv)) {
required_version = ICL_CSR_VERSION_REQUIRED;
} else if (IS_CANNONLAKE(dev_priv)) {
required_version = CNL_CSR_VERSION_REQUIRED;
} else if (IS_GEMINILAKE(dev_priv)) {
required_version = GLK_CSR_VERSION_REQUIRED;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_version = BXT_CSR_VERSION_REQUIRED;
} else {
MISSING_CASE(INTEL_REVID(dev_priv));
required_version = 0;
}
if (csr->version != required_version) {
if (csr->required_version &&
css_header->version != csr->required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(required_version),
CSR_VERSION_MINOR(required_version));
CSR_VERSION_MAJOR(css_header->version),
CSR_VERSION_MINOR(css_header->version),
CSR_VERSION_MAJOR(csr->required_version),
CSR_VERSION_MINOR(csr->required_version));
return NULL;
}
csr->version = css_header->version;
readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/
@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
if (INTEL_GEN(dev_priv) >= 11)
max_fw_size = ICL_CSR_MAX_FW_SIZE;
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
max_fw_size = GLK_CSR_MAX_FW_SIZE;
else if (IS_GEN9(dev_priv))
max_fw_size = BXT_CSR_MAX_FW_SIZE;
else
MISSING_CASE(INTEL_REVID(dev_priv));
if (nbytes > max_fw_size) {
if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
}
@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
if (i915_modparams.dmc_firmware_path)
csr->fw_path = i915_modparams.dmc_firmware_path;
else if (IS_ICELAKE(dev_priv))
csr->fw_path = I915_CSR_ICL;
else if (IS_CANNONLAKE(dev_priv))
csr->fw_path = I915_CSR_CNL;
else if (IS_GEMINILAKE(dev_priv))
csr->fw_path = I915_CSR_GLK;
else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
/*
* Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend.
* Obtain a runtime pm reference, until CSR is loaded, to avoid entering
* runtime-suspend.
*
* On error, we return with the rpm wakeref held to prevent runtime
* suspend as runtime suspend *requires* a working CSR for whatever
* reason.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (IS_ICELAKE(dev_priv)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
} else if (IS_CANNONLAKE(dev_priv)) {
csr->fw_path = CNL_CSR_PATH;
csr->required_version = CNL_CSR_VERSION_REQUIRED;
csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
} else if (IS_GEMINILAKE(dev_priv)) {
csr->fw_path = GLK_CSR_PATH;
csr->required_version = GLK_CSR_VERSION_REQUIRED;
csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
csr->fw_path = KBL_CSR_PATH;
csr->required_version = KBL_CSR_VERSION_REQUIRED;
csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
} else if (IS_SKYLAKE(dev_priv)) {
csr->fw_path = SKL_CSR_PATH;
csr->required_version = SKL_CSR_VERSION_REQUIRED;
csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
} else if (IS_BROXTON(dev_priv)) {
csr->fw_path = BXT_CSR_PATH;
csr->required_version = BXT_CSR_VERSION_REQUIRED;
csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
}
if (i915_modparams.dmc_firmware_path) {
if (strlen(i915_modparams.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
DRM_INFO("Disabling CSR firmware and runtime PM\n");
return;
}
csr->fw_path = i915_modparams.dmc_firmware_path;
/* Bypass version check for firmware override. */
csr->required_version = 0;
}
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));

View file

@ -642,7 +642,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_KBL_ULX(dev_priv)) {
if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@ -658,7 +658,7 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@ -680,7 +680,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@ -1060,10 +1060,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int clock = crtc->config->port_clock;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
@ -1517,7 +1517,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
if (pipe_config->ycbcr420)
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@ -1737,16 +1737,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
if (IS_ICELAKE(dev_priv))
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
else if (IS_ICELAKE(dev_priv))
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
else if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@ -1784,6 +1784,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
break;
}
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
* colorspace information. The output colorspace encoding is BT601.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@ -1998,24 +2005,24 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
return ret;
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
enum pipe p;
u32 tmp;
bool ret;
u8 mst_pipe_mask;
*pipe_mask = 0;
*is_dp_mst = false;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
return;
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
@ -2023,44 +2030,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
/* fallthrough */
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
*pipe = PIPE_A;
*pipe_mask = BIT(PIPE_A);
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
*pipe = PIPE_B;
*pipe_mask = BIT(PIPE_B);
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
*pipe = PIPE_C;
*pipe_mask = BIT(PIPE_C);
break;
}
ret = true;
goto out;
}
mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder) p;
enum transcoder cpu_transcoder = (enum transcoder)p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
goto out;
if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
continue;
*pipe = p;
ret = true;
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
mst_pipe_mask |= BIT(p);
goto out;
}
*pipe_mask |= BIT(p);
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
if (!*pipe_mask)
DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
port_name(port));
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
port_name(port), *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
port_name(port), *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
out:
if (ret && IS_GEN9_LP(dev_priv)) {
if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
@ -2070,12 +2091,26 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
}
intel_display_power_put(dev_priv, encoder->power_domain);
}
return ret;
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
u8 pipe_mask;
bool is_mst;
intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
if (is_mst || !pipe_mask)
return false;
*pipe = ffs(pipe_mask) - 1;
return true;
}
static inline enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
@ -2089,13 +2124,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
* Note that PSR is enabled only on Port A even though this function
* returns the correct domain for other ports too.
*/
return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
intel_dp->aux_power_domain;
return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
intel_aux_power_domain(dig_port);
}
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
u64 domains;
@ -2110,12 +2146,13 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(&encoder->base);
domains = BIT_ULL(dig_port->ddi_io_power_domain);
/* AUX power is only needed for (e)DP mode, not for HDMI. */
if (intel_crtc_has_dp_encoder(crtc_state)) {
struct intel_dp *intel_dp = &dig_port->dp;
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
}
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
return domains;
}
@ -2813,12 +2850,59 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
}
}
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
enum port port = encoder->port;
bool clk_enabled;
/*
* In case of DP MST, we sanitize the primary encoder only, not the
* virtual ones.
*/
if (encoder->type == INTEL_OUTPUT_DP_MST)
return;
val = I915_READ(DPCLKA_CFGCR0_ICL);
clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
u8 pipe_mask;
bool is_mst;
intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
/*
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
if (WARN_ON(is_mst))
return;
}
if (clk_enabled == !!encoder->base.crtc)
return;
/*
* Punt on the case now where clock is disabled, but the encoder is
* enabled, something else is really broken then.
*/
if (WARN_ON(!clk_enabled))
return;
DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
port_name(port));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint32_t val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
return;
@ -2828,7 +2912,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_ICELAKE(dev_priv)) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_pll_sel(encoder, pll));
icl_pll_to_ddi_pll_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@ -2881,6 +2965,137 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
int i;
if (tc_port == PORT_TC_NONE)
return;
for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
val = I915_READ(mg_regs[i]);
val |= MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
I915_WRITE(mg_regs[i], val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
MG_MISC_SUS0_CFG_TR2PWR_GATING |
MG_MISC_SUS0_CFG_CL2PWR_GATING |
MG_MISC_SUS0_CFG_GAONPWR_GATING |
MG_MISC_SUS0_CFG_TRPWR_GATING |
MG_MISC_SUS0_CFG_CL1PWR_GATING |
MG_MISC_SUS0_CFG_DGPWR_GATING;
I915_WRITE(MG_MISC_SUS0(tc_port), val);
}
static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
int i;
if (tc_port == PORT_TC_NONE)
return;
for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
val = I915_READ(mg_regs[i]);
val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING);
I915_WRITE(mg_regs[i], val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
MG_MISC_SUS0_CFG_TR2PWR_GATING |
MG_MISC_SUS0_CFG_CL2PWR_GATING |
MG_MISC_SUS0_CFG_GAONPWR_GATING |
MG_MISC_SUS0_CFG_TRPWR_GATING |
MG_MISC_SUS0_CFG_CL1PWR_GATING |
MG_MISC_SUS0_CFG_DGPWR_GATING);
I915_WRITE(MG_MISC_SUS0(tc_port), val);
}
static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 ln0, ln1, lane_info;
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
return;
ln0 = I915_READ(MG_DP_MODE(port, 0));
ln1 = I915_READ(MG_DP_MODE(port, 1));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
DP_LANE_ASSIGNMENT_SHIFT(tc_port);
switch (lane_info) {
case 0x1:
case 0x4:
break;
case 0x2:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
break;
case 0x3:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
break;
case 0xC:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
case 0xF:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
MISSING_CASE(lane_info);
}
break;
case TC_PORT_LEGACY:
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
MISSING_CASE(intel_dig_port->tc_type);
return;
}
I915_WRITE(MG_DP_MODE(port, 0), ln0);
I915_WRITE(MG_DP_MODE(port, 1), ln1);
}
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@ -2894,19 +3109,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(intel_dp));
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(intel_dp);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
@ -2944,10 +3156,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
@ -2958,12 +3173,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
icl_enable_phy_clock_gating(dig_port);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
intel_dig_port->set_infoframes(&encoder->base,
intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
@ -2993,10 +3210,22 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
else
} else {
struct intel_lspcon *lspcon =
enc_to_intel_lspcon(&encoder->base);
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
enc_to_dig_port(&encoder->base);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
}
}
static void intel_disable_ddi_buf(struct intel_encoder *encoder)
@ -3049,9 +3278,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
intel_display_power_put(dev_priv,
intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@ -3062,7 +3288,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
dig_port->set_infoframes(&encoder->base, false,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
intel_ddi_disable_pipe_clock(old_crtc_state);
@ -3154,6 +3380,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
{
static const i915_reg_t regs[] = {
[PORT_A] = CHICKEN_TRANS_EDP,
[PORT_B] = CHICKEN_TRANS_A,
[PORT_C] = CHICKEN_TRANS_B,
[PORT_D] = CHICKEN_TRANS_C,
[PORT_E] = CHICKEN_TRANS_A,
};
WARN_ON(INTEL_GEN(dev_priv) < 9);
if (WARN_ON(port < PORT_A || port > PORT_E))
port = PORT_A;
return regs[port];
}
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@ -3177,17 +3423,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
static const enum transcoder port_to_transcoder[] = {
[PORT_A] = TRANSCODER_EDP,
[PORT_B] = TRANSCODER_A,
[PORT_C] = TRANSCODER_B,
[PORT_D] = TRANSCODER_C,
[PORT_E] = TRANSCODER_A,
};
enum transcoder transcoder = port_to_transcoder[port];
i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
val = I915_READ(CHICKEN_TRANS(transcoder));
val = I915_READ(reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@ -3196,8 +3435,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
I915_WRITE(CHICKEN_TRANS(transcoder), val);
POSTING_READ(CHICKEN_TRANS(transcoder));
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(1);
@ -3208,7 +3447,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
I915_WRITE(CHICKEN_TRANS(transcoder), val);
I915_WRITE(reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
@ -3282,13 +3521,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
enum port port)
{
uint8_t mask = pipe_config->lane_lat_optim_mask;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
switch (pipe_config->lane_count) {
case 1:
val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
DFLEXDPMLE1_DPMLETC_ML0(tc_port);
break;
case 2:
val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
break;
case 4:
val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
break;
default:
MISSING_CASE(pipe_config->lane_count);
}
I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
}
static void
intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum port port = encoder->port;
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
if (IS_GEN9_LP(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
/*
* Program the lane count for static/dynamic connections on Type-C ports.
* Skip this step for TBT.
*/
if (dig_port->tc_type == TC_PORT_UNKNOWN ||
dig_port->tc_type == TC_PORT_TBT)
return;
intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
}
static void
intel_ddi_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
intel_display_power_put(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@ -3353,10 +3655,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@ -3406,7 +3708,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@ -3767,6 +4069,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@ -3805,8 +4108,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
if (IS_GEN9_LP(dev_priv))
intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
@ -3817,8 +4120,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
for_each_pipe(dev_priv, pipe)
intel_encoder->crtc_mask |= BIT(pipe);
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@ -3828,6 +4132,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
switch (port) {
case PORT_A:
@ -3858,8 +4163,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
MISSING_CASE(port);
}
intel_infoframe_init(intel_dig_port);
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
@ -3888,6 +4191,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
port_name(port));
}
intel_infoframe_init(intel_dig_port);
return;
err:

View file

@ -744,27 +744,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
} else if (INTEL_GEN(dev_priv) == 9) {
} else if (IS_GEN9(dev_priv)) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
}
BUILD_BUG_ON(I915_NUM_ENGINES >
sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
* one of those, not both. Until we can safely expose the topmost plane
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
if (IS_GEN11(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 6;
else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
* one of those, not both. Until we can safely expose the topmost plane
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
@ -844,13 +847,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) == 9)
else if (IS_GEN9(dev_priv))
gen9_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) == 10)
else if (IS_GEN10(dev_priv))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
if (IS_GEN6(dev_priv) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
info->ppgtt = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
@ -872,40 +880,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
u8 vdbox_disable, vebox_disable;
u32 media_fuse;
int i;
unsigned int i;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
if (!(BIT(i) & vdbox_disable))
continue;
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
if (!(BIT(i) & info->vdbox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
}
}
DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
if (!(BIT(i) & vebox_disable))
continue;
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
if (!(BIT(i) & info->vebox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}
}

View file

@ -25,6 +25,8 @@
#ifndef _INTEL_DEVICE_INFO_H_
#define _INTEL_DEVICE_INFO_H_
#include <uapi/drm/i915_drm.h>
#include "intel_display.h"
struct drm_printer;
@ -74,21 +76,25 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
enum intel_ppgtt {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
INTEL_PPGTT_FULL_4LVL,
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(has_aliasing_ppgtt); \
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
func(has_reset_engine); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_full_ppgtt); \
func(has_full_48bit_ppgtt); \
func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
@ -118,7 +124,7 @@ enum intel_platform {
struct sseu_dev_info {
u8 slice_mask;
u8 subslice_mask[GEN_MAX_SUBSLICES];
u8 subslice_mask[GEN_MAX_SLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@ -154,6 +160,7 @@ struct intel_device_info {
enum intel_platform platform;
u32 platform_mask;
enum intel_ppgtt ppgtt;
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@ -170,7 +177,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
@ -178,6 +184,10 @@ struct intel_device_info {
u32 cs_timestamp_frequency_khz;
/* Enabled (not fused off) media engine bitmasks. */
u8 vdbox_enable;
u8 vebox_enable;
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;

File diff suppressed because it is too large Load diff

View file

@ -43,6 +43,11 @@ enum i915_gpio {
GPIOM,
};
/*
* Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
* rest have consecutive values and match the enum values of transcoders
* with a 1:1 transcoder -> pipe mapping.
*/
enum pipe {
INVALID_PIPE = -1,
@ -57,12 +62,25 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
/*
* The following transcoders have a 1:1 transcoder -> pipe mapping,
* keep their values fixed: the code assumes that TRANSCODER_A=0, the
* rest have consecutive values and match the enum values of the pipes
* they map to.
*/
TRANSCODER_A = PIPE_A,
TRANSCODER_B = PIPE_B,
TRANSCODER_C = PIPE_C,
/*
* The following transcoders can map to any pipe, their enum value
* doesn't need to stay fixed.
*/
TRANSCODER_EDP,
TRANSCODER_DSI_A,
TRANSCODER_DSI_C,
TRANSCODER_DSI_0,
TRANSCODER_DSI_1,
TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
I915_MAX_TRANSCODERS
};
@ -120,6 +138,9 @@ enum plane_id {
PLANE_SPRITE0,
PLANE_SPRITE1,
PLANE_SPRITE2,
PLANE_SPRITE3,
PLANE_SPRITE4,
PLANE_SPRITE5,
PLANE_CURSOR,
I915_MAX_PLANES,
@ -363,7 +384,7 @@ struct intel_link_m_n {
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_rev(__dev_priv, __power_well) \
#define for_each_power_well_reverse(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@ -373,8 +394,8 @@ struct intel_link_m_n {
for_each_power_well(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
for_each_power_well_rev(__dev_priv, __power_well) \
#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \

File diff suppressed because it is too large Load diff

View file

@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0 &&
intel_dig_port->base.pre_pll_enable)
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
pipe_config, NULL);
}
static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
old_crtc_state,
old_conn_state);
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
intel_connector->port);
}
static void
intel_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;

View file

@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
if (crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
if (crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)

View file

@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
* @crtc: CRTC which has a shared dpll
* @crtc_state: CRTC, and its state, which has a shared dpll
*
* This calls the PLL's prepare hook if it has one and if the PLL is not
* already enabled. The prepare hook is platform specific.
*/
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(pll == NULL))
return;
@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc: CRTC which has a shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Enable the shared DPLL used by @crtc.
*/
void intel_enable_shared_dpll(struct intel_crtc *crtc)
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
@ -199,14 +199,15 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
/**
* intel_disable_shared_dpll - disable a CRTC's shared DPLL
* @crtc: CRTC which has a shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Disable the shared DPLL used by @crtc.
*/
void intel_disable_shared_dpll(struct intel_crtc *crtc)
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
/* Make sure no transcoder isn't still depending on us. */
for_each_intel_crtc(dev, crtc) {
if (crtc->config->shared_dpll == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
I915_WRITE(PCH_DPLL(id), 0);
POSTING_READ(PCH_DPLL(id));
@ -2628,11 +2621,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
}
static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
{
return port - PORT_C + DPLL_ID_ICL_MGPLL1;
}
bool intel_dpll_is_combophy(enum intel_dpll_id id)
{
return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
uint32_t *target_dco_khz,
struct intel_dpll_hw_state *state)
@ -2874,8 +2872,8 @@ static struct intel_shared_dpll *
icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
@ -2883,18 +2881,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
bool ret;
switch (port) {
case PORT_A:
case PORT_B:
if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
&pll_state);
break;
case PORT_C:
case PORT_D:
case PORT_E:
case PORT_F:
} else if (intel_port_is_tc(dev_priv, port)) {
if (encoder->type == INTEL_OUTPUT_DP_MST) {
struct intel_dp_mst_encoder *mst_encoder;
mst_encoder = enc_to_mst(&encoder->base);
intel_dig_port = mst_encoder->primary;
} else {
intel_dig_port = enc_to_dig_port(&encoder->base);
}
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
@ -2906,8 +2907,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
&pll_state);
}
break;
default:
} else {
MISSING_CASE(port);
return NULL;
}
@ -2932,21 +2932,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
{
switch (id) {
default:
MISSING_CASE(id);
/* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
if (intel_dpll_is_combophy(id))
return CNL_DPLL_ENABLE(id);
case DPLL_ID_ICL_TBTPLL:
else if (id == DPLL_ID_ICL_TBTPLL)
return TBT_PLL_ENABLE;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
else
/*
* TODO: Make MG_PLL macros use
* tc port id instead of port id
*/
return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
}
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@ -2965,17 +2960,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
case DPLL_ID_ICL_TBTPLL:
if (intel_dpll_is_combophy(id) ||
id == DPLL_ID_ICL_TBTPLL) {
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
break;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
} else {
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@ -3013,9 +3002,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
break;
default:
MISSING_CASE(id);
}
ret = true;
@ -3104,21 +3090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", id);
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
case DPLL_ID_ICL_TBTPLL:
if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
icl_dpll_write(dev_priv, pll);
break;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
else
icl_mg_pll_write(dev_priv, pll);
break;
default:
MISSING_CASE(id);
}
/*
* DVFS pre sequence would be here, but in our driver the cdclk code

View file

@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
struct drm_atomic_state *state);
void intel_prepare_shared_dpll(struct intel_crtc *crtc);
void intel_enable_shared_dpll(struct intel_crtc *crtc);
void intel_disable_shared_dpll(struct intel_crtc *crtc);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
#endif /* _INTEL_DPLL_MGR_H_ */

View file

@ -381,6 +381,15 @@ struct intel_hdcp_shim {
bool *hdcp_capable);
};
struct intel_hdcp {
const struct intel_hdcp_shim *shim;
/* Mutex for hdcp state of the connector */
struct mutex mutex;
u64 value;
struct delayed_work check_work;
struct work_struct prop_work;
};
struct intel_connector {
struct drm_connector base;
/*
@ -413,11 +422,7 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
const struct intel_hdcp_shim *hdcp_shim;
struct mutex hdcp_mutex;
uint64_t hdcp_value; /* protected by hdcp_mutex */
struct delayed_work hdcp_check_work;
struct work_struct hdcp_prop_work;
struct intel_hdcp hdcp;
};
struct intel_digital_connector_state {
@ -539,6 +544,26 @@ struct intel_plane_state {
*/
int scaler_id;
/*
* linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
struct intel_plane *linked_plane;
/*
* slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
u32 slave;
struct drm_intel_sprite_colorkey ckey;
};
@ -547,6 +572,7 @@ struct intel_initial_plane_config {
unsigned int tiling;
int size;
u32 base;
u8 rotation;
};
#define SKL_MIN_SRC_W 8
@ -712,6 +738,13 @@ struct intel_crtc_wm_state {
bool need_postvbl_update;
};
enum intel_output_format {
INTEL_OUTPUT_FORMAT_INVALID,
INTEL_OUTPUT_FORMAT_RGB,
INTEL_OUTPUT_FORMAT_YCBCR420,
INTEL_OUTPUT_FORMAT_YCBCR444,
};
struct intel_crtc_state {
struct drm_crtc_state base;
@ -899,8 +932,11 @@ struct intel_crtc_state {
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
/* output format is YCBCR 4:2:0 */
bool ycbcr420;
/* Output format RGB/YCBCR etc */
enum intel_output_format output_format;
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
};
struct intel_crtc {
@ -973,6 +1009,9 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*update_slave)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@ -1070,13 +1109,13 @@ struct intel_dp {
bool link_mst;
bool link_trained;
bool has_audio;
bool detect_done;
bool reset_link_params;
enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capable;
/* source rates */
int num_source_rates;
const int *source_rates;
@ -1094,7 +1133,6 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@ -1156,9 +1194,15 @@ struct intel_dp {
struct intel_dp_compliance compliance;
};
enum lspcon_vendor {
LSPCON_VENDOR_MCA,
LSPCON_VENDOR_PARADE
};
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
};
struct intel_digital_port {
@ -1170,18 +1214,20 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
enum tc_port_type tc_type;
void (*write_infoframe)(struct drm_encoder *encoder,
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
bool (*infoframe_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@ -1281,6 +1327,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return NULL;
}
static inline struct intel_digital_port *
conn_to_dig_port(struct intel_connector *connector)
{
return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
}
static inline struct intel_dp_mst_encoder *
enc_to_mst(struct drm_encoder *encoder)
{
@ -1306,6 +1358,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
static inline struct intel_lspcon *
enc_to_intel_lspcon(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->lspcon;
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@ -1330,6 +1388,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
static inline struct intel_plane_state *
intel_atomic_get_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
struct drm_plane_state *ret =
drm_atomic_get_plane_state(&state->base, &plane->base);
if (IS_ERR(ret))
return ERR_CAST(ret);
return to_intel_plane_state(ret);
}
static inline struct intel_plane_state *
intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
&plane->base));
}
static inline struct intel_plane_state *
intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
@ -1444,6 +1523,7 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct drm_atomic_state *old_state);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@ -1488,7 +1568,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@ -1509,20 +1588,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@ -1628,9 +1699,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
@ -1641,6 +1714,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@ -1670,6 +1745,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
/* intel_connector.c */
int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
int intel_connector_register(struct drm_connector *connector);
void intel_connector_unregister(struct drm_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
bool intel_connector_get_hw_state(struct intel_connector *connector);
enum pipe intel_connector_get_pipe(struct intel_connector *connector);
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
@ -1728,9 +1821,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@ -1748,6 +1838,10 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
int mode_clock, int mode_hdisplay);
uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@ -1768,6 +1862,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* vlv_dsi.c */
void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* icl_dsi.c */
void icl_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@ -1858,7 +1955,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
@ -1866,19 +1962,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
/* intel_modes.c */
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_overlay.c */
void intel_setup_overlay(struct drm_i915_private *dev_priv);
void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
void intel_overlay_setup(struct drm_i915_private *dev_priv);
void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@ -1907,7 +1993,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
void intel_panel_destroy_backlight(struct drm_connector *connector);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@ -1936,6 +2021,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@ -1962,11 +2048,16 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
@ -2101,10 +2192,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry entries[],
int num_entries, int ignore_idx);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@ -2127,23 +2217,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
unsigned int skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
static inline bool icl_is_nv12_y_plane(enum plane_id id)
{
/* Don't need to do a gen check, these planes are only available on gen11 */
if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
return true;
return false;
}
static inline bool icl_is_hdr_plane(struct intel_plane *plane)
{
if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
return false;
return plane->id < PLANE_SPRITE2;
}
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@ -2185,11 +2281,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@ -2205,6 +2306,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *buf, ssize_t len);
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void lspcon_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *crtc_state);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS

View file

@ -0,0 +1,128 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include <drm/drm_mipi_dsi.h>
#include "intel_dsi.h"
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
{
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
if (WARN_ON(bpp < 0))
bpp = 16;
return intel_dsi->pclk * bpp / intel_dsi->lane_count;
}
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
{
switch (intel_dsi->escape_clk_div) {
default:
case 0:
return 50;
case 1:
return 100;
case 2:
return 200;
}
}
int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
DRM_DEBUG_KMS("\n");
if (!intel_connector->panel.fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
return 0;
}
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
return 0;
}
drm_mode_probed_add(connector, mode);
return 1;
}
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
DRM_DEBUG_KMS("\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
if (fixed_mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
}
return MODE_OK;
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port)
{
struct intel_dsi_host *host;
struct mipi_dsi_device *device;
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host)
return NULL;
host->base.ops = funcs;
host->intel_dsi = intel_dsi;
host->port = port;
/*
* We should call mipi_dsi_host_register(&host->base) here, but we don't
* have a host->dev, and we don't have OF stuff either. So just use the
* dsi framework as a library and hope for the best. Create the dsi
* devices by ourselves here too. Need to be careful though, because we
* don't initialize any of the driver model devices here.
*/
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device) {
kfree(host);
return NULL;
}
device->host = &host->base;
host->device = device;
return host;
}
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum drm_panel_orientation orientation;
orientation = dev_priv->vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
orientation = dev_priv->vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
return DRM_MODE_PANEL_ORIENTATION_NORMAL;
}

View file

@ -81,14 +81,21 @@ struct intel_dsi {
u16 dcs_backlight_ports;
u16 dcs_cabc_ports;
/* RGB or BGR */
bool bgr_enabled;
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
/* data lanes dphy timing */
u32 dphy_data_lane_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
/* timeouts in byte clocks */
u16 hs_tx_timeout;
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
@ -129,9 +136,31 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
}
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector);
/* vlv_dsi.c */
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
int intel_dsi_get_modes(struct drm_connector *connector);
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@ -158,5 +187,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
#endif /* _INTEL_DSI_H */

View file

@ -111,6 +111,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
@ -181,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
if (!IS_ICELAKE(dev_priv))
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
@ -481,6 +483,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
return;
msleep(msec);
}
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct intel_connector *connector = intel_dsi->attached_connector;
@ -499,110 +512,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
return 1;
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
#define ICL_PREPARE_CNT_MAX 0x7
#define ICL_CLK_ZERO_CNT_MAX 0xf
#define ICL_TRAIL_CNT_MAX 0x7
#define ICL_TCLK_PRE_CNT_MAX 0x3
#define ICL_TCLK_POST_CNT_MAX 0x7
#define ICL_HS_ZERO_CNT_MAX 0xf
#define ICL_EXIT_ZERO_CNT_MAX 0x7
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
u32 bpp;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
u32 hs_zero_cnt;
u32 tclk_pre_cnt, tclk_post_cnt;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
/*
* prepare cnt in escape clocks
* this field represents a hexadecimal value with a precision
* of 1.2 i.e. the most significant bit is the integer
* and the least significant 2 bits are fraction bits.
* so, the field can represent a range of 0.25 to 1.75
*/
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
prepare_cnt = ICL_PREPARE_CNT_MAX;
}
/* clk zero count in escape clocks */
clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
ths_prepare_ns, tlpx_ns);
if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
/* trail cnt in escape clocks*/
trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
if (trail_cnt > ICL_TRAIL_CNT_MAX) {
DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
trail_cnt = ICL_TRAIL_CNT_MAX;
}
/* tclk pre count in escape clocks */
tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
}
/* tclk post count in escape clocks */
tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
}
/* hs zero cnt in escape clocks */
hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
ths_prepare_ns, tlpx_ns);
if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
}
/* hs exit zero cnt in escape clocks */
exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
}
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
CLK_PREPARE(prepare_cnt) |
CLK_ZERO_OVERRIDE |
CLK_ZERO(clk_zero_cnt) |
CLK_PRE_OVERRIDE |
CLK_PRE(tclk_pre_cnt) |
CLK_POST_OVERRIDE |
CLK_POST(tclk_post_cnt) |
CLK_TRAIL_OVERRIDE |
CLK_TRAIL(trail_cnt));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
HS_PREPARE(prepare_cnt) |
HS_ZERO_OVERRIDE |
HS_ZERO(hs_zero_cnt) |
HS_TRAIL_OVERRIDE |
HS_TRAIL(trail_cnt) |
HS_EXIT_OVERRIDE |
HS_EXIT(exit_zero_cnt));
}
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u32 mul;
u16 burst_mode_ratio;
enum port port;
DRM_DEBUG_KMS("\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
pixel_format_from_register_bits(
mipi_config->videomode_color_format << 7);
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
pclk = mode->clock;
/* In dual link mode each port needs half of pixel clock */
if (intel_dsi->dual_link) {
pclk = pclk / 2;
/* we can enable pixel_overlap if needed by panel. In this
* case we need to increase the pixelclock for extra pixels
*/
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
pclk += DIV_ROUND_UP(mode->vtotal *
intel_dsi->pixel_overlap *
60, 1000);
}
}
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
}
burst_mode_ratio = DIV_ROUND_UP(
mipi_config->target_burst_mode_freq * 100,
computed_ddr);
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return false;
}
} else
burst_mode_ratio = 100;
intel_dsi->burst_mode_ratio = burst_mode_ratio;
intel_dsi->pclk = pclk;
bitrate = (pclk * bpp) / intel_dsi->lane_count;
switch (intel_dsi->escape_clk_div) {
case 0:
tlpx_ns = 50;
break;
case 1:
tlpx_ns = 100;
break;
case 2:
tlpx_ns = 200;
break;
default:
tlpx_ns = 50;
break;
}
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
switch (intel_dsi->lane_count) {
case 1:
@ -620,7 +648,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* in Kbps */
ui_num = NS_KHZ_RATIO;
ui_den = bitrate;
ui_den = intel_dsi_bitrate(intel_dsi);
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@ -746,6 +774,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
u16 burst_mode_ratio;
enum port port;
DRM_DEBUG_KMS("\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
pixel_format_from_register_bits(
mipi_config->videomode_color_format << 7);
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
intel_dsi->bgr_enabled = mipi_config->rgb_flip;
/* Starting point, adjusted depending on dual link and burst mode */
intel_dsi->pclk = mode->clock;
/* In dual link mode each port needs half of pixel clock */
if (intel_dsi->dual_link) {
intel_dsi->pclk /= 2;
/* we can enable pixel_overlap if needed by panel. In this
* case we need to increase the pixelclock for extra pixels
*/
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
}
}
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
u32 bitrate = intel_dsi_bitrate(intel_dsi);
if (mipi_config->target_burst_mode_freq < bitrate) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
}
burst_mode_ratio = DIV_ROUND_UP(
mipi_config->target_burst_mode_freq * 100,
bitrate);
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return false;
}
} else
burst_mode_ratio = 100;
intel_dsi->burst_mode_ratio = burst_mode_ratio;
if (IS_ICELAKE(dev_priv))
icl_dphy_param_init(intel_dsi);
else
vlv_dphy_param_init(intel_dsi);
DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);

View file

@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
return 0;
}
static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
intel_panel_fini(&to_intel_connector(connector)->panel);
kfree(connector);
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dvo_destroy,
.destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,

View file

@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
return -EINVAL;
GEM_BUG_ON(dev_priv->engine[id]);
@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
err = -EINVAL;
err_id = id;
if (GEM_WARN_ON(!init))
if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
err = init(engine);
@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@ -809,7 +812,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
if (INTEL_GEN(dev_priv) == 10)
if (IS_GEN10(dev_priv))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@ -1534,10 +1537,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
list_for_each_entry(rq, &p->requests, sched.link) {
priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
@ -1559,8 +1562,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
w->tsk->comm, w->tsk->pid,
task_state_to_char(w->tsk),
w->seqno);
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);

View file

@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (INTEL_GEN(dev_priv) == 7)
if (IS_GEN7(dev_priv))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
if (!cache->plane.visible)
return;
@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
cache->fb.format->has_alpha) {
fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
return false;
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {

View file

@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
crtc->state->adjusted_mode.crtc_hdisplay,
crtc->state->adjusted_mode.crtc_vdisplay,
fb->base.format->cpp[0] * 8,
cur_size);

View file

@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
unsigned int i;
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/*
* The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
* then return, so waiting on the H2G is not enough to guarantee GuC is done.
* When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
* scratch register 14, so we can poll on that. Note that GuC does not ensure
* that the value in the register is different from
* INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
* take care of that ourselves as well.
*/
static int guc_sleep_state_action(struct intel_guc *guc,
const u32 *action, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
u32 status;
I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, len);
if (ret)
return ret;
ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
return ret;
if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
DRM_ERROR("GuC failed to change sleep state. "
"action=0x%x, err=%u\n",
action[0], status);
return -EIO;
}
return 0;
}
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
return intel_guc_send(guc, data, ARRAY_SIZE(data));
return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
return intel_guc_send(guc, data, ARRAY_SIZE(data));
return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**

View file

@ -95,6 +95,11 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
static inline bool intel_guc_is_alive(struct intel_guc *guc)
{
return intel_uc_fw_is_loaded(&guc->fw);
}
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{

View file

@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
DRM_WARN("%s: No firmware known for this platform!\n",
dev_info(dev_priv->drm.dev,
"%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
}
}
@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
guc_fw->rsa_offset) != sizeof(rsa))
return -EINVAL;
sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
rsa, sizeof(rsa), guc->fw.rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
}
/*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
u32 status;
int ret;
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
/* Wait for DMA to finish */
ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
2, 100, &status);
DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
return ret;
/* Did we complete the xfer? */
*status = I915_READ(DMA_CTRL);
return !(*status & START_DMA);
}
/*
@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
* (Higher levels of the driver may decide to reset the GuC and
* attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(guc, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@ -228,9 +189,51 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -ENOEXEC;
}
if (ret == 0 && !guc_xfer_completed(guc, &status)) {
DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
status);
ret = -ENXIO;
}
return ret;
}
/*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
return guc_wait_ucode(guc);
}
/*
* Load the GuC firmware blob into the MinuteIA.
*/
@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
ret = guc_xfer_rsa(guc, vma);
if (ret)
DRM_WARN("GuC firmware signature xfer error %d\n", ret);
guc_xfer_rsa(guc, vma);
ret = guc_xfer_ucode(guc, vma);
if (ret)
DRM_WARN("GuC firmware code xfer error %d\n", ret);
ret = guc_wait_ucode(guc);
if (ret)
DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

View file

@ -39,6 +39,11 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
#define GUC_DOORBELL_INVALID 256
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@ -59,9 +64,6 @@
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@ -219,26 +221,6 @@ struct uc_css_header {
u32 header_info;
} __packed;
struct guc_doorbell_info {
u32 db_status;
u32 cookie;
u32 reserved[14];
} __packed;
union guc_doorbell_qw {
struct {
u32 db_status;
u32 cookie;
};
u64 value_qw;
} __packed;
#define GUC_NUM_DOORBELLS 256
#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
* registers, where first register holds data treated as message header,
* and other registers are used to hold message payload.
*
* For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
* For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
* but no H2G command takes more than 8 parameters and the GuC FW
* itself uses an 8-element array to store the H2G message.
*
* +-----------+---------+---------+---------+
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
* field.
*/
#define GUC_MAX_MMIO_MSG_LEN 8
#define INTEL_GUC_MSG_TYPE_SHIFT 28
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
#define INTEL_GUC_MSG_DATA_SHIFT 16
@ -687,6 +673,13 @@ enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
enum intel_guc_sleep_state_status {
INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
};
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)

View file

@ -104,6 +104,18 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
#define GUC_NUM_DOORBELLS 256
/* format of the HW-monitored doorbell cacheline */
struct guc_doorbell_info {
u32 db_status;
#define GUC_DOORBELL_DISABLED 0
#define GUC_DOORBELL_ENABLED 1
u32 cookie;
u32 reserved[14];
} __packed;
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)

View file

@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
return client->vaddr + client->doorbell_offset;
}
static void __create_doorbell(struct intel_guc_client *client)
static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
}
static void __init_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
doorbell->cookie = 0;
}
static void __destroy_doorbell(struct intel_guc_client *client)
static void __fini_doorbell(struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
doorbell->cookie = 0;
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
* release the doorbell
*/
if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
}
@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
return -ENODEV; /* internal setup error, should never happen */
__update_doorbell_desc(client, client->doorbell_id);
__create_doorbell(client);
__init_doorbell(client);
ret = __guc_allocate_doorbell(client->guc, client->stage_id);
if (ret) {
__destroy_doorbell(client);
__fini_doorbell(client);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
client->stage_id, ret);
@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
GEM_BUG_ON(!has_doorbell(client));
__destroy_doorbell(client);
__fini_doorbell(client);
ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
if (ret)
DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
static void guc_proc_desc_init(struct intel_guc *guc,
struct intel_guc_client *client)
static void guc_proc_desc_init(struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
static void guc_proc_desc_fini(struct intel_guc_client *client)
{
struct guc_process_desc *desc;
desc = __get_process_desc(client);
memset(desc, 0, sizeof(*desc));
}
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
static void guc_stage_desc_init(struct intel_guc *guc,
struct intel_guc_client *client)
static void guc_stage_desc_init(struct intel_guc_client *client)
{
struct intel_guc *guc = client->guc;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->desc_private = ptr_to_u64(client);
}
static void guc_stage_desc_fini(struct intel_guc *guc,
struct intel_guc_client *client)
static void guc_stage_desc_fini(struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
static void guc_reset_wq(struct intel_guc_client *client)
{
struct guc_process_desc *desc = __get_process_desc(client);
desc->head = 0;
desc->tail = 0;
}
static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
int i;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->sched.link);
if (port == last_port)
goto done;
}
if (submit)
port_assign(port, last);
port++;
}
INIT_LIST_HEAD(&rq->sched.link);
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@ -791,19 +793,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
static void guc_dequeue(struct intel_engine_cs *engine)
{
unsigned long flags;
bool submit;
local_irq_save(flags);
spin_lock(&engine->timeline.lock);
submit = __guc_dequeue(engine);
spin_unlock(&engine->timeline.lock);
if (submit)
if (__guc_dequeue(engine))
guc_submit(engine);
local_irq_restore(flags);
}
static void guc_submission_tasklet(unsigned long data)
@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct i915_request *rq;
unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static struct i915_request *
@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 drbregl;
bool valid;
GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
drbregl = I915_READ(GEN8_DRBREGL(db_id));
valid = drbregl & GEN8_DRB_VALID;
valid = __doorbell_valid(guc, db_id);
if (test_bit(db_id, guc->doorbell_bitmap) == valid)
return true;
DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
db_id, drbregl, yesno(valid));
DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
db_id, yesno(valid));
return false;
}
static bool guc_verify_doorbells(struct intel_guc *guc)
{
bool doorbells_ok = true;
u16 db_id;
for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
if (!doorbell_ok(guc, db_id))
return false;
doorbells_ok = false;
return true;
}
static int guc_clients_doorbell_init(struct intel_guc *guc)
{
int ret;
ret = create_doorbell(guc->execbuf_client);
if (ret)
return ret;
if (guc->preempt_client) {
ret = create_doorbell(guc->preempt_client);
if (ret) {
destroy_doorbell(guc->execbuf_client);
return ret;
}
}
return 0;
}
static void guc_clients_doorbell_fini(struct intel_guc *guc)
{
/*
* By the time we're here, GuC has already been reset.
* Instead of trying (in vain) to communicate with it, let's just
* cleanup the doorbell HW and our internal state.
*/
if (guc->preempt_client) {
__destroy_doorbell(guc->preempt_client);
__update_doorbell_desc(guc->preempt_client,
GUC_DOORBELL_INVALID);
}
if (guc->execbuf_client) {
__destroy_doorbell(guc->execbuf_client);
__update_doorbell_desc(guc->execbuf_client,
GUC_DOORBELL_INVALID);
}
return doorbells_ok;
}
/**
@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
client->vaddr = vaddr;
ret = reserve_doorbell(client);
if (ret)
goto err_vaddr;
client->doorbell_offset = __select_cacheline(guc);
/*
@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
guc_proc_desc_init(guc, client);
guc_stage_desc_init(guc, client);
ret = reserve_doorbell(client);
if (ret)
goto err_vaddr;
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
priority, client, client->engines, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@ -1045,7 +997,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
static void guc_client_free(struct intel_guc_client *client)
{
unreserve_doorbell(client);
guc_stage_desc_fini(client->guc, client);
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
kfree(client);
@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
}
static int __guc_client_enable(struct intel_guc_client *client)
{
int ret;
guc_proc_desc_init(client);
guc_stage_desc_init(client);
ret = create_doorbell(client);
if (ret)
goto fail;
return 0;
fail:
guc_stage_desc_fini(client);
guc_proc_desc_fini(client);
return ret;
}
static void __guc_client_disable(struct intel_guc_client *client)
{
/*
* By the time we're here, GuC may have already been reset. if that is
* the case, instead of trying (in vain) to communicate with it, let's
* just cleanup the doorbell HW and our internal state.
*/
if (intel_guc_is_alive(client->guc))
destroy_doorbell(client);
else
__fini_doorbell(client);
guc_stage_desc_fini(client);
guc_proc_desc_fini(client);
}
static int guc_clients_enable(struct intel_guc *guc)
{
int ret;
ret = __guc_client_enable(guc->execbuf_client);
if (ret)
return ret;
if (guc->preempt_client) {
ret = __guc_client_enable(guc->preempt_client);
if (ret) {
__guc_client_disable(guc->execbuf_client);
return ret;
}
}
return 0;
}
static void guc_clients_disable(struct intel_guc *guc)
{
if (guc->preempt_client)
__guc_client_disable(guc->preempt_client);
if (guc->execbuf_client)
__guc_client_disable(guc->execbuf_client);
}
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
guc_reset_wq(guc->execbuf_client);
if (guc->preempt_client)
guc_reset_wq(guc->preempt_client);
err = intel_guc_sample_forcewake(guc);
if (err)
return err;
err = guc_clients_doorbell_init(guc);
err = guc_clients_enable(guc);
if (err)
return err;
@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
guc_clients_doorbell_fini(guc);
guc_clients_disable(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

View file

@ -16,6 +16,62 @@
#define KEY_LOAD_TRIES 5
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
int i, ones = 0;
/* KSV has 20 1's and 20 0's */
for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
ones += hweight8(ksv[i]);
if (ones != 20)
return false;
return true;
}
static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
for (i = 0; i < tries; i++) {
ret = shim->read_bksv(intel_dig_port, bksv);
if (ret)
return ret;
if (intel_hdcp_is_ksv_valid(bksv))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Bksv is invalid\n");
return -ENODEV;
}
return 0;
}
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
if (!shim)
return capable;
if (shim->hdcp_capable) {
shim->hdcp_capable(intel_dig_port, &capable);
} else {
if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
capable = true;
}
return capable;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@ -167,18 +223,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
return -EINVAL;
}
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
int i, ones = 0;
/* KSV has 20 1's and 20 0's */
for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
ones += hweight8(ksv[i]);
if (ones != 20)
return false;
return true;
}
static
int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim,
@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
return ret;
}
@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
DRM_ERROR("Max Topology Limit Exceeded\n");
DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
return -EPERM;
}
@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
DRM_ERROR("V Prime validation failed.(%d)\n", ret);
DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
goto err;
}
@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret)
return ret;
if (!hdcp_capable) {
DRM_ERROR("Panel is not HDCP capable\n");
DRM_DEBUG_KMS("Panel is not HDCP capable\n");
return -EINVAL;
}
}
@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
memset(&bksv, 0, sizeof(bksv));
/* HDCP spec states that we must retry the bksv if it is invalid */
for (i = 0; i < tries; i++) {
ret = shim->read_bksv(intel_dig_port, bksv.shim);
if (ret)
return ret;
if (intel_hdcp_is_ksv_valid(bksv.shim))
break;
}
if (i == tries) {
DRM_ERROR("HDCP failed, Bksv is invalid\n");
return -ENODEV;
}
ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
if (ret < 0)
return ret;
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
I915_READ(PORT_HDCP_STATUS(port)));
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
I915_READ(PORT_HDCP_STATUS(port)));
return -ETIMEDOUT;
}
@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
return 0;
}
static
struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
{
return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
}
static int _intel_hdcp_disable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
DRM_ERROR("Failed to disable HDCP signalling\n");
return ret;
@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
static int _intel_hdcp_enable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(conn_to_dig_port(connector),
connector->hdcp_shim);
ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
if (!ret)
return 0;
@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
_intel_hdcp_disable(connector);
}
DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
static inline
struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
{
return container_of(hdcp, struct intel_connector, hdcp);
}
static void intel_hdcp_check_work(struct work_struct *work)
{
struct intel_connector *connector = container_of(to_delayed_work(work),
struct intel_connector,
hdcp_check_work);
struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
struct intel_hdcp,
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
if (!intel_hdcp_check_link(connector))
schedule_delayed_work(&connector->hdcp_check_work,
schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
static void intel_hdcp_prop_work(struct work_struct *work)
{
struct intel_connector *connector = container_of(work,
struct intel_connector,
hdcp_prop_work);
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&connector->hdcp_mutex);
mutex_lock(&hdcp->mutex);
/*
* This worker is only used to flip between ENABLED/DESIRED. Either of
* those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
state = connector->base.state;
state->content_protection = connector->hdcp_value;
state->content_protection = hdcp->value;
}
mutex_unlock(&connector->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
}
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim)
const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = drm_connector_attach_content_protection_property(
@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
if (ret)
return ret;
connector->hdcp_shim = hdcp_shim;
mutex_init(&connector->hdcp_mutex);
INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
if (!connector->hdcp_shim)
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&connector->hdcp_mutex);
mutex_lock(&hdcp->mutex);
ret = _intel_hdcp_enable(connector);
if (ret)
goto out;
connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&connector->hdcp_prop_work);
schedule_delayed_work(&connector->hdcp_check_work,
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
out:
mutex_unlock(&connector->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
int intel_hdcp_disable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret = 0;
if (!connector->hdcp_shim)
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&connector->hdcp_mutex);
mutex_lock(&hdcp->mutex);
if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
ret = _intel_hdcp_disable(connector);
}
mutex_unlock(&connector->hdcp_mutex);
cancel_delayed_work_sync(&connector->hdcp_check_work);
mutex_unlock(&hdcp->mutex);
cancel_delayed_work_sync(&hdcp->check_work);
return ret;
}
@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/* Implements Part 3 of the HDCP authorization procedure */
int intel_hdcp_check_link(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
int ret = 0;
if (!connector->hdcp_shim)
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&connector->hdcp_mutex);
mutex_lock(&hdcp->mutex);
if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
I915_READ(PORT_HDCP_STATUS(port)));
ret = -ENXIO;
connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&connector->hdcp_prop_work);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
}
if (connector->hdcp_shim->check_link(intel_dig_port)) {
if (connector->hdcp_value !=
DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
connector->hdcp_value =
DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&connector->hdcp_prop_work);
if (hdcp->shim->check_link(intel_dig_port)) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
}
goto out;
}
@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
if (ret) {
DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&connector->hdcp_prop_work);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
}
ret = _intel_hdcp_enable(connector);
if (ret) {
DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&connector->hdcp_prop_work);
DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
}
out:
mutex_unlock(&connector->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}

Some files were not shown because too many files have changed in this diff Show more