From 42f6e3da974dc8ad81775110c8d06835acdf375e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:53:18 +0100 Subject: [PATCH 1/8] drm/vmwgfx: always use ttm_bo_is_reserved Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index e01a17b407b2..16556170fb32 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -959,13 +959,13 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (new_backup && new_backup != res->backup) { if (res->backup) { - BUG_ON(atomic_read(&res->backup->base.reserved) == 0); + BUG_ON(!ttm_bo_is_reserved(&res->backup->base)); list_del_init(&res->mob_head); vmw_dmabuf_unreference(&res->backup); } res->backup = vmw_dmabuf_reference(new_backup); - BUG_ON(atomic_read(&new_backup->base.reserved) == 0); + BUG_ON(!ttm_bo_is_reserved(&new_backup->base)); list_add_tail(&res->mob_head, &new_backup->res_list); } if (new_backup) From 979ee290ff0a543352243145dc3654af5a856ab8 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:54:22 +0100 Subject: [PATCH 2/8] drm/nouveau: increase reservation sequence every retry This is temporary until the fence framework can be used. With the lru/reservation atomicity removal it is possible to see your old sequence number and the buffer being reserved, leading to erroneously reporting -EDEADLK. Workaround it by bumping the sequence number every retry. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 8bf695c52f95..9fcfcb2a020c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -321,8 +321,8 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, int trycnt = 0; int ret, i; - sequence = atomic_add_return(1, &drm->ttm.validate_sequence); retry: + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); if (++trycnt > 100000) { NV_ERROR(drm, "%s failed and gave up.\n", __func__); return -EINVAL; From 63d0a4195560362e2e00a3ad38fc331d34e1da9b Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:56:37 +0100 Subject: [PATCH 3/8] drm/ttm: remove lru_lock around ttm_bo_reserve There should no longer be assumptions that reserve will always succeed with the lru lock held, so we can safely break the whole atomic reserve/lru thing. As a bonus this fixes most lockdep annotations for reservations. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/ttm/ttm_bo.c | 54 +++++++++++++++++--------- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 +- include/drm/ttm/ttm_bo_driver.h | 19 ++------- 3 files changed, 40 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 33d20be87db5..e8e4814b1295 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; } -int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_global *glob = bo->glob; int ret; - while (unlikely(atomic_read(&bo->reserved) != 0)) { + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, if (no_wait) return -EBUSY; - spin_unlock(&glob->lru_lock); ret = ttm_bo_wait_unreserved(bo, interruptible); - spin_lock(&glob->lru_lock); if (unlikely(ret)) return ret; } - atomic_set(&bo->reserved, 1); if (use_sequence) { + bool wake_up = false; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ if (unlikely((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)) - wake_up_all(&bo->event_queue); + wake_up = true; + /* + * In the worst case with memory ordering these values can be + * seen in the wrong order. However since we call wake_up_all + * in that case, this will hopefully not pose a problem, + * and the worst case would only cause someone to accidentally + * hit -EAGAIN in ttm_bo_reserve when they see old value of + * val_seq. However this would only happen if seq_valid was + * written before val_seq was, and just means some slightly + * increased cpu usage + */ bo->val_seq = sequence; bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); } else { bo->seq_valid = false; } @@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, int put_count = 0; int ret; - spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, - sequence); - if (likely(ret == 0)) + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, + sequence); + if (likely(ret == 0)) { + spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - - ttm_bo_list_ref_sub(bo, put_count, true); + spin_unlock(&glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); + } return ret; } @@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); /* * We raced, and lost, someone else holds the reservation now, @@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); + ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); + if (remove_all && ret) { + spin_unlock(&glob->lru_lock); + ret = ttm_bo_reserve_nolru(entry, false, false, + false, 0); + spin_lock(&glob->lru_lock); + } + if (!ret) ret = ttm_bo_cleanup_refs_and_unlock(entry, false, !remove_all); @@ -815,7 +831,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } @@ -1796,7 +1812,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index cd9e4523dc56..bd37b5cb8553 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -153,7 +153,7 @@ retry: struct ttm_buffer_object *bo = entry->bo; retry_this_bo: - ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: break; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3a43a47d78c..6fff43222e20 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * to make room for a buffer already reserved. (Buffers are reserved before * they are evicted). The following algorithm prevents such deadlocks from * occurring: - * 1) Buffers are reserved with the lru spinlock held. Upon successful - * reservation they are removed from the lru list. This stops a reserved buffer - * from being evicted. However the lru spinlock is released between the time - * a buffer is selected for eviction and the time it is reserved. - * Therefore a check is made when a buffer is reserved for eviction, that it - * is still the first buffer in the lru list, before it is removed from the - * list. @check_lru == 1 forces this check. If it fails, the function returns - * -EINVAL, and the caller should then choose a new buffer to evict and repeat - * the procedure. - * 2) Processes attempting to reserve multiple buffers other than for eviction, + * Processes attempting to reserve multiple buffers other than for eviction, * (typically execbuf), should first obtain a unique 32-bit * validation sequence number, * and call this function with @use_sequence == 1 and @sequence == the unique @@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, /** - * ttm_bo_reserve_locked: + * ttm_bo_reserve_nolru: * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. @@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * @use_sequence: If @bo is already reserved, Only sleep waiting for * it to become unreserved if @sequence < (@bo)->sequence. * - * Must be called with struct ttm_bo_global::lru_lock held, - * and will not remove reserved buffers from the lru lists. - * The function may release the LRU spinlock if it needs to sleep. + * Will not remove reserved buffers from the lru lists. * Otherwise identical to ttm_bo_reserve. * * Returns: @@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * -EDEADLK: Bo already reserved using @sequence. This error code will only * be returned if @use_sequence is set to true. */ -extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); From 7a1863084c9d90ce4b67d645bf9b0f1612e68f62 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:56:48 +0100 Subject: [PATCH 4/8] drm/ttm: cleanup ttm_eu_reserve_buffers handling With the lru lock no longer required for protecting reservations we can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors in a single path. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 53 ++++++++++---------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index bd37b5cb8553..c7d323657798 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } } -static int ttm_eu_wait_unreserved_locked(struct list_head *list, - struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->glob; - int ret; - - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - ret = ttm_bo_wait_unreserved(bo, true); - spin_lock(&glob->lru_lock); - if (unlikely(ret != 0)) - ttm_eu_backoff_reservation_locked(list); - return ret; -} - - void ttm_eu_backoff_reservation(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -152,19 +136,23 @@ retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; -retry_this_bo: ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: break; case -EBUSY: - ret = ttm_eu_wait_unreserved_locked(list, bo); - if (unlikely(ret != 0)) { - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; - } - goto retry_this_bo; + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_reserve_nolru(bo, true, false, + true, val_seq); + spin_lock(&glob->lru_lock); + if (!ret) + break; + + if (unlikely(ret != -EAGAIN)) + goto err; + + /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); @@ -174,18 +162,13 @@ retry_this_bo: return ret; goto retry; default: - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; + goto err; } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return -EBUSY; + ret = -EBUSY; + goto err; } } @@ -194,6 +177,12 @@ retry_this_bo: ttm_eu_list_ref_sub(list); return 0; + +err: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); From 5e45d7dfd74100d622f9cdc70bfd1f9fae1671de Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:57:05 +0100 Subject: [PATCH 5/8] drm/ttm: add ttm_bo_reserve_slowpath Instead of dropping everything, waiting for the bo to be unreserved and trying over, a better strategy would be to do a blocking wait. This can be mapped a lot better to a mutex_lock-like call. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/ttm/ttm_bo.c | 47 +++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_bo_driver.h | 30 +++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e8e4814b1295..4dd6f9e77a7d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } +int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + bool wake_up = false; + int ret; + + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { + WARN_ON(bo->seq_valid && sequence == bo->val_seq); + + ret = ttm_bo_wait_unreserved(bo, interruptible); + + if (unlikely(ret)) + return ret; + } + + if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + wake_up = true; + + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + bo->val_seq = sequence; + bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); + + return 0; +} + +int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count, ret; + + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); + if (likely(!ret)) { + spin_lock(&glob->lru_lock); + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); + } + return ret; +} +EXPORT_SYMBOL(ttm_bo_reserve_slowpath); + void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) { ttm_bo_add_to_lru(bo); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6fff43222e20..5af71af6bf88 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -821,6 +821,36 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); +/** + * ttm_bo_reserve_slowpath_nolru: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve_slowpath. + */ +extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, + uint32_t sequence); + + +/** + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence); /** * ttm_bo_reserve_nolru: From f2d476a110bc24fde008698ae9018c99e803e25c Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:57:10 +0100 Subject: [PATCH 6/8] drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2 This requires re-use of the seqno, which increases fairness slightly. Instead of spinning with a new seqno every time we keep the current one, but still drop all other reservations we hold. Only when we succeed, we try to get back our other reservations again. This should increase fairness slightly as well. Changes since v1: - Increase val_seq before calling ttm_bo_reserve_slowpath_nolru and retrying to take all entries to prevent a race. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c7d323657798..7b90def15674 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; -retry: spin_lock(&glob->lru_lock); val_seq = entry->bo->bdev->val_seq++; +retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; + /* already slowpath reserved? */ + if (entry->reserved) + continue; + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: @@ -155,11 +159,26 @@ retry: /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); + + /* + * temporarily increase sequence number every retry, + * to prevent us from seeing our old reservation + * sequence when someone else reserved the buffer, + * but hasn't updated the seq_valid/seqno members yet. + */ + val_seq = entry->bo->bdev->val_seq++; + spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_unreserved(bo, true); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); if (unlikely(ret != 0)) return ret; + spin_lock(&glob->lru_lock); + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ret = -EBUSY; + goto err; + } goto retry; default: goto err; From c354c893dd57aac11f5d96ada7c47a20fe090a6e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:57:20 +0100 Subject: [PATCH 7/8] drm/nouveau: use ttm_bo_reserve_slowpath in validate_init, v2 Similar rationale to the identical commit in drm/ttm. Instead of only waiting for unreservation, we make sure we actually own the reservation, then retry to get the rest. Changes since v1: - Increase the seqno before calling ttm_bo_reserve_slowpath Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/nouveau/nouveau_gem.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9fcfcb2a020c..7fa6882c2942 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -320,9 +320,10 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, uint32_t sequence; int trycnt = 0; int ret, i; + struct nouveau_bo *res_bo = NULL; -retry: sequence = atomic_add_return(1, &drm->ttm.validate_sequence); +retry: if (++trycnt > 100000) { NV_ERROR(drm, "%s failed and gave up.\n", __func__); return -EINVAL; @@ -340,6 +341,11 @@ retry: return -ENOENT; } nvbo = gem->driver_private; + if (nvbo == res_bo) { + res_bo = NULL; + drm_gem_object_unreference_unlocked(gem); + continue; + } if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { NV_ERROR(drm, "multiple instances of buffer %d on " @@ -352,15 +358,19 @@ retry: ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); if (ret) { validate_fini(op, NULL); - if (unlikely(ret == -EAGAIN)) - ret = ttm_bo_wait_unreserved(&nvbo->bo, true); - drm_gem_object_unreference_unlocked(gem); + if (unlikely(ret == -EAGAIN)) { + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); + ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, + sequence); + if (!ret) + res_bo = nvbo; + } if (unlikely(ret)) { + drm_gem_object_unreference_unlocked(gem); if (ret != -ERESTARTSYS) NV_ERROR(drm, "fail reserve\n"); return ret; } - goto retry; } b->user_priv = (uint64_t)(unsigned long)nvbo; @@ -382,6 +392,8 @@ retry: validate_fini(op, NULL); return -EINVAL; } + if (nvbo == res_bo) + goto retry; } return 0; From cc4c0c4de3c775be22072ec3251f2e581b63d9a0 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 15 Jan 2013 14:57:28 +0100 Subject: [PATCH 8/8] drm/ttm: unexport ttm_bo_wait_unreserved All legitimate users of this function outside ttm_bo.c are gone, now it's only an implementation detail. Signed-off-by: Maarten Lankhorst Reviewed-by: Jerome Glisse --- drivers/gpu/drm/ttm/ttm_bo.c | 4 ++-- include/drm/ttm/ttm_bo_driver.h | 12 ------------ 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4dd6f9e77a7d..4df47f72214a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } -int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) +static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + bool interruptible) { if (interruptible) { return wait_event_interruptible(bo->event_queue, @@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) return 0; } } -EXPORT_SYMBOL(ttm_bo_wait_unreserved); void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5af71af6bf88..0fbd046e7c93 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -898,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); */ extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); -/** - * ttm_bo_wait_unreserved - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Wait for a struct ttm_buffer_object to become unreserved. - * This is typically used in the execbuf code to relax cpu-usage when - * a potential deadlock condition backoff. - */ -extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - bool interruptible); - /* * ttm_bo_util.c */