drm/ttm: use a helper for unlocked moves to the lru tail

The pattern was repeated a few times, just make an inline for it.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-7-airlied@gmail.com
This commit is contained in:
Dave Airlie 2020-08-04 12:55:39 +10:00
parent 46bca88bbd
commit 20784cdf4b
3 changed files with 11 additions and 12 deletions

View File

@ -1101,9 +1101,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail_unlocked(bo);
}
return ret;
@ -1318,9 +1316,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail_unlocked(bo);
return ret;
}

View File

@ -306,9 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
if (bo->moving != moving) {
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail_unlocked(bo);
}
dma_fence_put(moving);
}

View File

@ -669,6 +669,13 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
return 0;
}
static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
}
/**
* ttm_bo_unreserve
*
@ -678,9 +685,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}