diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index fe27c5b344e3..7c2650cfb070 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) * mapping will then trigger a page fault on the next user access, allowing * fixup by vm_fault_gtt(). */ -static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) +void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); intel_wakeref_t wakeref; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index 862e01b7cb69..7c5ccdf59359 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -24,8 +24,11 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, struct drm_device *dev, u32 handle, u64 *offset); -void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); + +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); + void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 0158e49bf9bb..ff72ee2fd9cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, i915_gem_object_unlock(obj); /* Force the fence to be reacquired for GTT access */ - i915_gem_object_release_mmap(obj); + i915_gem_object_release_mmap_gtt(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) {