diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2d9617a3472d..f85e6fbf27f4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -743,17 +743,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(VLV_IIR_RW)); seq_printf(m, "Display IMR:\t%08x\n", I915_READ(VLV_IMR)); - for_each_pipe(dev_priv, pipe) + for_each_pipe(dev_priv, pipe) { + enum intel_display_power_domain power_domain; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, + power_domain)) { + seq_printf(m, "Pipe %c power disabled\n", + pipe_name(pipe)); + continue; + } + seq_printf(m, "Pipe %c stat:\t%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); + intel_display_power_put(dev_priv, power_domain); + } + + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); seq_printf(m, "Port hotplug:\t%08x\n", I915_READ(PORT_HOTPLUG_EN)); seq_printf(m, "DPFLIPSTAT:\t%08x\n", I915_READ(VLV_DPFLIPSTAT)); seq_printf(m, "DPINVGTT:\t%08x\n", I915_READ(DPINVGTT)); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); for (i = 0; i < 4; i++) { seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", @@ -1396,14 +1411,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; u32 rgvmodectl, rstdbyctl; u16 crstandvid; - int ret; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; intel_runtime_pm_get(dev_priv); rgvmodectl = I915_READ(MEMMODECTL); @@ -1411,7 +1421,6 @@ static int ironlake_drpc_info(struct seq_file *m) crstandvid = I915_READ16(CRSTANDVID); intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); seq_printf(m, "Boost freq: %d\n", @@ -1757,6 +1766,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) bool sr_enabled = false; intel_runtime_pm_get(dev_priv); + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); if (HAS_PCH_SPLIT(dev_priv)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; @@ -1770,6 +1780,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_runtime_pm_put(dev_priv); seq_printf(m, "self-refresh: %s\n", @@ -2091,12 +2102,7 @@ static const char *swizzle_string(unsigned swizzle) static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - int ret; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; intel_runtime_pm_get(dev_priv); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", @@ -2136,7 +2142,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) seq_puts(m, "L-shaped memory detected\n"); intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -2542,11 +2547,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; else { for_each_pipe(dev_priv, pipe) { + enum transcoder cpu_transcoder = + intel_pipe_to_cpu_transcoder(dev_priv, pipe); + enum intel_display_power_domain power_domain; + + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + if (!intel_display_power_get_if_enabled(dev_priv, + power_domain)) + continue; + stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & VLV_EDP_PSR_CURR_STATE_MASK; if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) enabled = true; + + intel_display_power_put(dev_priv, power_domain); } } @@ -3094,6 +3110,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) struct intel_engine_cs *engine; enum intel_engine_id id; + intel_runtime_pm_get(dev_priv); + for_each_engine(engine, dev_priv, id) { struct intel_breadcrumbs *b = &engine->breadcrumbs; struct drm_i915_gem_request *rq; @@ -3213,6 +3231,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) seq_puts(m, "\n"); } + intel_runtime_pm_put(dev_priv); + return 0; } @@ -4799,13 +4819,9 @@ i915_wedged_set(void *data, u64 val) if (i915_reset_in_progress(&dev_priv->gpu_error)) return -EAGAIN; - intel_runtime_pm_get(dev_priv); - i915_handle_error(dev_priv, val, "Manually setting wedged to %llu", val); - intel_runtime_pm_put(dev_priv); - return 0; } @@ -5040,22 +5056,16 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - struct drm_device *dev = &dev_priv->drm; u32 snpcr; - int ret; if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) return -ENODEV; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; intel_runtime_pm_get(dev_priv); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 912d5348e3e7..885d33f341f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2301,24 +2301,6 @@ static int intel_runtime_suspend(struct device *kdev) DRM_DEBUG_KMS("Suspending device\n"); - /* - * We could deadlock here in case another thread holding struct_mutex - * calls RPM suspend concurrently, since the RPM suspend will wait - * first for this RPM suspend to finish. In this case the concurrent - * RPM resume will be followed by its RPM suspend counterpart. Still - * for consistency return -EAGAIN, which will reschedule this suspend. - */ - if (!mutex_trylock(&dev->struct_mutex)) { - DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); - /* - * Bump the expiration timestamp, otherwise the suspend won't - * be rescheduled. - */ - pm_runtime_mark_last_busy(kdev); - - return -EAGAIN; - } - disable_rpm_wakeref_asserts(dev_priv); /* @@ -2326,7 +2308,6 @@ static int intel_runtime_suspend(struct device *kdev) * an RPM reference. */ i915_gem_release_all_mmaps(dev_priv); - mutex_unlock(&dev->struct_mutex); intel_guc_suspend(dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 33ebf6d2556c..63bf51b117a9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -826,6 +826,7 @@ i915_gem_gtt_pread(struct drm_device *dev, uint64_t offset; int ret; + intel_runtime_pm_get(to_i915(dev)); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (!IS_ERR(vma)) { node.start = i915_ggtt_offset(vma); @@ -926,6 +927,7 @@ i915_gem_gtt_pread(struct drm_device *dev, i915_vma_unpin(vma); } out: + intel_runtime_pm_put(to_i915(dev)); return ret; } @@ -1060,12 +1062,9 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_shmem_pread(dev, obj, args, file); /* pread for non shmem backed objects */ - if (ret == -EFAULT || ret == -ENODEV) { - intel_runtime_pm_get(to_i915(dev)); + if (ret == -EFAULT || ret == -ENODEV) ret = i915_gem_gtt_pread(dev, obj, args->size, args->offset, args->data_ptr); - intel_runtime_pm_put(to_i915(dev)); - } i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); @@ -1126,6 +1125,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, if (i915_gem_object_is_tiled(obj)) return -EFAULT; + intel_runtime_pm_get(i915); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK); if (!IS_ERR(vma)) { @@ -1234,6 +1234,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, i915_vma_unpin(vma); } out: + intel_runtime_pm_put(i915); return ret; } @@ -1466,12 +1467,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (!i915_gem_object_has_struct_page(obj) || - cpu_write_needs_clflush(obj)) { - ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); + cpu_write_needs_clflush(obj)) /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between - * textures). Fallback to the shmem path in that case. */ - } + * textures). Fallback to the shmem path in that case. + */ + ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); if (ret == -EFAULT || ret == -ENOSPC) { if (obj->phys_handle) @@ -1840,6 +1841,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) goto err_unpin; /* Mark as being mmapped into userspace for later revocation */ + assert_rpm_wakelock_held(dev_priv); spin_lock(&dev_priv->mm.userfault_lock); if (list_empty(&obj->userfault_link)) list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); @@ -1925,8 +1927,13 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) /* Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user * pagefault then has to wait until we release the mutex. + * + * Note that RPM complicates somewhat by adding an additional + * requirement that operations to the GGTT be made holding the RPM + * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); + intel_runtime_pm_get(i915); spin_lock(&i915->mm.userfault_lock); if (!list_empty(&obj->userfault_link)) { @@ -1935,7 +1942,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) } spin_unlock(&i915->mm.userfault_lock); if (!zap) - return; + goto out; drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->anon_inode->i_mapping); @@ -1948,6 +1955,9 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) * memory writes before touching registers / GSM. */ wmb(); + +out: + intel_runtime_pm_put(i915); } void @@ -3476,7 +3486,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; @@ -3493,23 +3503,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, * cacheline, whereas normally such cachelines would get * invalidated. */ - if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) + if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) return -ENODEV; level = I915_CACHE_LLC; break; case I915_CACHING_DISPLAY: - level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE; + level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; break; default: return -EINVAL; } - intel_runtime_pm_get(dev_priv); - ret = i915_mutex_lock_interruptible(dev); if (ret) - goto rpm_put; + return ret; obj = i915_gem_object_lookup(file, args->handle); if (!obj) { @@ -3518,13 +3526,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, } ret = i915_gem_object_set_cache_level(obj, level); - i915_gem_object_put(obj); unlock: mutex_unlock(&dev->struct_mutex); -rpm_put: - intel_runtime_pm_put(dev_priv); - return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 062fb0ad75da..33036359c170 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2667,6 +2667,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + struct drm_i915_private *i915 = to_i915(vma->vm->dev); struct drm_i915_gem_object *obj = vma->obj; u32 pte_flags = 0; int ret; @@ -2679,8 +2680,10 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (obj->gt_ro) pte_flags |= PTE_READ_ONLY; + intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); + intel_runtime_pm_put(i915); /* * Without aliasing PPGTT there's no difference between @@ -2696,6 +2699,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + struct drm_i915_private *i915 = to_i915(vma->vm->dev); u32 pte_flags; int ret; @@ -2710,14 +2714,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { + intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); + intel_runtime_pm_put(i915); } if (flags & I915_VMA_LOCAL_BIND) { - struct i915_hw_ppgtt *appgtt = - to_i915(vma->vm->dev)->mm.aliasing_ppgtt; + struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, vma->pages, vma->node.start, cache_level, pte_flags); @@ -2728,12 +2733,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { - struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; + struct drm_i915_private *i915 = to_i915(vma->vm->dev); + struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; const u64 size = min(vma->size, vma->node.size); - if (vma->flags & I915_VMA_GLOBAL_BIND) + if (vma->flags & I915_VMA_GLOBAL_BIND) { + intel_runtime_pm_get(i915); vma->vm->clear_range(vma->vm, vma->node.start, size); + intel_runtime_pm_put(i915); + } if (vma->flags & I915_VMA_LOCAL_BIND && appgtt) appgtt->base.clear_range(&appgtt->base, diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c21bc0068d20..71f80d2a487c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -205,8 +205,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } - intel_runtime_pm_get(dev_priv); - mutex_lock(&dev->struct_mutex); if (obj->pin_display || obj->framebuffer_references) { err = -EBUSY; @@ -302,8 +300,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); - intel_runtime_pm_put(dev_priv); - return err; }