drm/i915: Consolidate get_fence with pin_fence

Following the pattern now used for obj->mm.pages, use just pin_fence and
unpin_fence to control access to the fence registers. I.e. instead of
calling get_fence(); pin_fence(), we now just need to call pin_fence().
This will make it easier to reduce the locking requirements around
fence registers.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-2-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Chris Wilson 2017-10-09 09:43:56 +01:00
parent b4563f595e
commit 3bd4073524
7 changed files with 45 additions and 30 deletions

View file

@ -3759,8 +3759,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
} }
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
struct drm_i915_fence_reg * struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv); i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence); void i915_unreserve_fence(struct drm_i915_fence_reg *fence);

View file

@ -1910,7 +1910,7 @@ int i915_gem_fault(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
ret = i915_vma_get_fence(vma); ret = i915_vma_pin_fence(vma);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
@ -1926,6 +1926,7 @@ int i915_gem_fault(struct vm_fault *vmf)
min_t(u64, vma->size, area->vm_end - area->vm_start), min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable); &ggtt->mappable);
i915_vma_unpin_fence(vma);
err_unpin: err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
err_unlock: err_unlock:

View file

@ -367,12 +367,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
return false; return false;
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_get_fence(vma))) { if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return false; return false;
} }
if (i915_vma_pin_fence(vma)) if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE; exec_flags |= __EXEC_OBJECT_HAS_FENCE;
} }
@ -385,7 +385,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
i915_vma_unpin_fence(vma); __i915_vma_unpin_fence(vma);
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
} }
@ -563,13 +563,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
} }
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma); err = i915_vma_pin_fence(vma);
if (unlikely(err)) { if (unlikely(err)) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return err; return err;
} }
if (i915_vma_pin_fence(vma)) if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE; exec_flags |= __EXEC_OBJECT_HAS_FENCE;
} }

View file

@ -280,8 +280,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* *
* 0 on success, negative error code on failure. * 0 on success, negative error code on failure.
*/ */
int int i915_vma_put_fence(struct i915_vma *vma)
i915_vma_put_fence(struct i915_vma *vma)
{ {
struct drm_i915_fence_reg *fence = vma->fence; struct drm_i915_fence_reg *fence = vma->fence;
@ -299,6 +298,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count) if (fence->pin_count)
continue; continue;
@ -313,7 +314,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
} }
/** /**
* i915_vma_get_fence - set up fencing for a vma * i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg * @vma: vma to map through a fence reg
* *
* When mapping objects through the GTT, userspace wants to be able to write * When mapping objects through the GTT, userspace wants to be able to write
@ -331,10 +332,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
* 0 on success, negative error code on failure. * 0 on success, negative error code on failure.
*/ */
int int
i915_vma_get_fence(struct i915_vma *vma) i915_vma_pin_fence(struct i915_vma *vma)
{ {
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
/* Note that we revoke fences on runtime suspend. Therefore the user /* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence. * must keep the device awake whilst using the fence.
@ -344,6 +346,8 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Just update our place in the LRU if our fence is getting reused. */ /* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) { if (vma->fence) {
fence = vma->fence; fence = vma->fence;
GEM_BUG_ON(fence->vma != vma);
fence->pin_count++;
if (!fence->dirty) { if (!fence->dirty) {
list_move_tail(&fence->link, list_move_tail(&fence->link,
&fence->i915->mm.fence_list); &fence->i915->mm.fence_list);
@ -353,10 +357,25 @@ i915_vma_get_fence(struct i915_vma *vma)
fence = fence_find(vma->vm->i915); fence = fence_find(vma->vm->i915);
if (IS_ERR(fence)) if (IS_ERR(fence))
return PTR_ERR(fence); return PTR_ERR(fence);
GEM_BUG_ON(fence->pin_count);
fence->pin_count++;
} else } else
return 0; return 0;
return fence_update(fence, set); err = fence_update(fence, set);
if (err)
goto out_unpin;
GEM_BUG_ON(fence->vma != set);
GEM_BUG_ON(vma->fence != (set ? fence : NULL));
if (set)
return 0;
out_unpin:
fence->pin_count--;
return err;
} }
/** /**
@ -429,6 +448,8 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma) if (fence->vma)
i915_gem_release_mmap(fence->vma->obj); i915_gem_release_mmap(fence->vma->obj);
} }
@ -450,6 +471,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma; struct i915_vma *vma = reg->vma;
GEM_BUG_ON(vma && vma->fence != reg);
/* /*
* Commit delayed tiling changes if we have an object still * Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence. * attached to the fence, otherwise just clear the fence.

View file

@ -309,12 +309,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
__i915_vma_pin(vma); __i915_vma_pin(vma);
err = i915_vma_get_fence(vma); err = i915_vma_pin_fence(vma);
if (err) if (err)
goto err_unpin; goto err_unpin;
i915_vma_pin_fence(vma);
return ptr; return ptr;
err_unpin: err_unpin:

View file

@ -345,15 +345,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* *
* True if the vma has a fence, false otherwise. * True if the vma has a fence, false otherwise.
*/ */
static inline bool int i915_vma_pin_fence(struct i915_vma *vma);
i915_vma_pin_fence(struct i915_vma *vma) int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); GEM_BUG_ON(vma->fence->pin_count <= 0);
if (vma->fence) { vma->fence->pin_count--;
vma->fence->pin_count++;
return true;
} else
return false;
} }
/** /**
@ -368,10 +366,8 @@ static inline void
i915_vma_unpin_fence(struct i915_vma *vma) i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) { if (vma->fence)
GEM_BUG_ON(vma->fence->pin_count <= 0); __i915_vma_unpin_fence(vma);
vma->fence->pin_count--;
}
} }
#endif #endif

View file

@ -2219,7 +2219,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* something and try to run the system in a "less than optimal" * something and try to run the system in a "less than optimal"
* mode that matches the user configuration. * mode that matches the user configuration.
*/ */
if (i915_vma_get_fence(vma) == 0)
i915_vma_pin_fence(vma); i915_vma_pin_fence(vma);
} }