linux-stable/include/drm/drm_exec.h

151 lines
4.3 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef __DRM_EXEC_H__
#define __DRM_EXEC_H__
#include <linux/compiler.h>
#include <linux/ww_mutex.h>
#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
struct drm_gem_object;
/**
* struct drm_exec - Execution context
*/
struct drm_exec {
/**
* @flags: Flags to control locking behavior
*/
uint32_t flags;
/**
* @ticket: WW ticket used for acquiring locks
*/
struct ww_acquire_ctx ticket;
/**
* @num_objects: number of objects locked
*/
unsigned int num_objects;
/**
* @max_objects: maximum objects in array
*/
unsigned int max_objects;
/**
* @objects: array of the locked objects
*/
struct drm_gem_object **objects;
/**
* @contended: contended GEM object we backed off for
*/
struct drm_gem_object *contended;
/**
* @prelocked: already locked GEM object due to contention
*/
struct drm_gem_object *prelocked;
};
drm/drm_exec: Work around a WW mutex lockdep oddity If *any* object of a certain WW mutex class is locked, lockdep will consider *all* mutexes of that class as locked. Also the lock allocation tracking code will apparently register only the address of the first mutex of a given class locked in a sequence. This has the odd consequence that if that first mutex is unlocked while other mutexes of the same class remain locked and then its memory then freed, the lock alloc tracking code will incorrectly assume that memory is freed with a held lock in there. For now, work around that for drm_exec by releasing the first grabbed object lock last. v2: - Fix a typo (Danilo Krummrich) - Reword the commit message a bit. - Add a Fixes: tag Related lock alloc tracking warning: [ 322.660067] ========================= [ 322.660070] WARNING: held lock freed! [ 322.660074] 6.5.0-rc7+ #155 Tainted: G U N [ 322.660078] ------------------------- [ 322.660081] kunit_try_catch/4981 is freeing memory ffff888112adc000-ffff888112adc3ff, with a lock still held there! [ 322.660089] ffff888112adc1a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: drm_exec_lock_obj+0x11a/0x600 [drm_exec] [ 322.660104] 2 locks held by kunit_try_catch/4981: [ 322.660108] #0: ffffc9000343fe18 (reservation_ww_class_acquire){+.+.}-{0:0}, at: test_early_put+0x22f/0x490 [drm_exec_test] [ 322.660123] #1: ffff888112adc1a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: drm_exec_lock_obj+0x11a/0x600 [drm_exec] [ 322.660135] stack backtrace: [ 322.660139] CPU: 7 PID: 4981 Comm: kunit_try_catch Tainted: G U N 6.5.0-rc7+ #155 [ 322.660146] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 0403 01/26/2021 [ 322.660152] Call Trace: [ 322.660155] <TASK> [ 322.660158] dump_stack_lvl+0x57/0x90 [ 322.660164] debug_check_no_locks_freed+0x20b/0x2b0 [ 322.660172] slab_free_freelist_hook+0xa1/0x160 [ 322.660179] ? drm_exec_unlock_all+0x168/0x2a0 [drm_exec] [ 322.660186] __kmem_cache_free+0xb2/0x290 [ 322.660192] drm_exec_unlock_all+0x168/0x2a0 [drm_exec] [ 322.660200] drm_exec_fini+0xf/0x1c0 [drm_exec] [ 322.660206] test_early_put+0x289/0x490 [drm_exec_test] [ 322.660215] ? __pfx_test_early_put+0x10/0x10 [drm_exec_test] [ 322.660222] ? __kasan_check_byte+0xf/0x40 [ 322.660227] ? __ksize+0x63/0x140 [ 322.660233] ? drmm_add_final_kfree+0x3e/0xa0 [drm] [ 322.660289] ? _raw_spin_unlock_irqrestore+0x30/0x60 [ 322.660294] ? lockdep_hardirqs_on+0x7d/0x100 [ 322.660301] ? __pfx_kunit_try_run_case+0x10/0x10 [kunit] [ 322.660310] ? __pfx_kunit_generic_run_threadfn_adapter+0x10/0x10 [kunit] [ 322.660319] kunit_generic_run_threadfn_adapter+0x4a/0x90 [kunit] [ 322.660328] kthread+0x2e7/0x3c0 [ 322.660334] ? __pfx_kthread+0x10/0x10 [ 322.660339] ret_from_fork+0x2d/0x70 [ 322.660345] ? __pfx_kthread+0x10/0x10 [ 322.660349] ret_from_fork_asm+0x1b/0x30 [ 322.660358] </TASK> [ 322.660818] ok 8 test_early_put Cc: Christian König <christian.koenig@amd.com> Cc: Boris Brezillon <boris.brezillon@collabora.com> Cc: Danilo Krummrich <dakr@redhat.com> Cc: dri-devel@lists.freedesktop.org Fixes: 09593216bff1 ("drm: execution context for GEM buffers v7") Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Danilo Krummrich <dakr@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230906095039.3320-4-thomas.hellstrom@linux.intel.com
2023-09-06 09:50:39 +00:00
/**
* drm_exec_obj() - Return the object for a give drm_exec index
* @exec: Pointer to the drm_exec context
* @index: The index.
*
* Return: Pointer to the locked object corresponding to @index if
* index is within the number of locked objects. NULL otherwise.
*/
static inline struct drm_gem_object *
drm_exec_obj(struct drm_exec *exec, unsigned long index)
{
return index < exec->num_objects ? exec->objects[index] : NULL;
}
/**
* drm_exec_for_each_locked_object - iterate over all the locked objects
* @exec: drm_exec object
* @index: unsigned long index for the iteration
* @obj: the current GEM object
*
* Iterate over all the locked GEM objects inside the drm_exec object.
*/
drm/drm_exec: Work around a WW mutex lockdep oddity If *any* object of a certain WW mutex class is locked, lockdep will consider *all* mutexes of that class as locked. Also the lock allocation tracking code will apparently register only the address of the first mutex of a given class locked in a sequence. This has the odd consequence that if that first mutex is unlocked while other mutexes of the same class remain locked and then its memory then freed, the lock alloc tracking code will incorrectly assume that memory is freed with a held lock in there. For now, work around that for drm_exec by releasing the first grabbed object lock last. v2: - Fix a typo (Danilo Krummrich) - Reword the commit message a bit. - Add a Fixes: tag Related lock alloc tracking warning: [ 322.660067] ========================= [ 322.660070] WARNING: held lock freed! [ 322.660074] 6.5.0-rc7+ #155 Tainted: G U N [ 322.660078] ------------------------- [ 322.660081] kunit_try_catch/4981 is freeing memory ffff888112adc000-ffff888112adc3ff, with a lock still held there! [ 322.660089] ffff888112adc1a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: drm_exec_lock_obj+0x11a/0x600 [drm_exec] [ 322.660104] 2 locks held by kunit_try_catch/4981: [ 322.660108] #0: ffffc9000343fe18 (reservation_ww_class_acquire){+.+.}-{0:0}, at: test_early_put+0x22f/0x490 [drm_exec_test] [ 322.660123] #1: ffff888112adc1a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: drm_exec_lock_obj+0x11a/0x600 [drm_exec] [ 322.660135] stack backtrace: [ 322.660139] CPU: 7 PID: 4981 Comm: kunit_try_catch Tainted: G U N 6.5.0-rc7+ #155 [ 322.660146] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 0403 01/26/2021 [ 322.660152] Call Trace: [ 322.660155] <TASK> [ 322.660158] dump_stack_lvl+0x57/0x90 [ 322.660164] debug_check_no_locks_freed+0x20b/0x2b0 [ 322.660172] slab_free_freelist_hook+0xa1/0x160 [ 322.660179] ? drm_exec_unlock_all+0x168/0x2a0 [drm_exec] [ 322.660186] __kmem_cache_free+0xb2/0x290 [ 322.660192] drm_exec_unlock_all+0x168/0x2a0 [drm_exec] [ 322.660200] drm_exec_fini+0xf/0x1c0 [drm_exec] [ 322.660206] test_early_put+0x289/0x490 [drm_exec_test] [ 322.660215] ? __pfx_test_early_put+0x10/0x10 [drm_exec_test] [ 322.660222] ? __kasan_check_byte+0xf/0x40 [ 322.660227] ? __ksize+0x63/0x140 [ 322.660233] ? drmm_add_final_kfree+0x3e/0xa0 [drm] [ 322.660289] ? _raw_spin_unlock_irqrestore+0x30/0x60 [ 322.660294] ? lockdep_hardirqs_on+0x7d/0x100 [ 322.660301] ? __pfx_kunit_try_run_case+0x10/0x10 [kunit] [ 322.660310] ? __pfx_kunit_generic_run_threadfn_adapter+0x10/0x10 [kunit] [ 322.660319] kunit_generic_run_threadfn_adapter+0x4a/0x90 [kunit] [ 322.660328] kthread+0x2e7/0x3c0 [ 322.660334] ? __pfx_kthread+0x10/0x10 [ 322.660339] ret_from_fork+0x2d/0x70 [ 322.660345] ? __pfx_kthread+0x10/0x10 [ 322.660349] ret_from_fork_asm+0x1b/0x30 [ 322.660358] </TASK> [ 322.660818] ok 8 test_early_put Cc: Christian König <christian.koenig@amd.com> Cc: Boris Brezillon <boris.brezillon@collabora.com> Cc: Danilo Krummrich <dakr@redhat.com> Cc: dri-devel@lists.freedesktop.org Fixes: 09593216bff1 ("drm: execution context for GEM buffers v7") Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Danilo Krummrich <dakr@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230906095039.3320-4-thomas.hellstrom@linux.intel.com
2023-09-06 09:50:39 +00:00
#define drm_exec_for_each_locked_object(exec, index, obj) \
for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
/**
* drm_exec_for_each_locked_object_reverse - iterate over all the locked
* objects in reverse locking order
* @exec: drm_exec object
* @index: unsigned long index for the iteration
* @obj: the current GEM object
*
* Iterate over all the locked GEM objects inside the drm_exec object in
* reverse locking order. Note that @index may go below zero and wrap,
* but that will be caught by drm_exec_obj(), returning a NULL object.
*/
#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
for ((index) = (exec)->num_objects - 1; \
((obj) = drm_exec_obj(exec, index)); --(index))
/**
* drm_exec_until_all_locked - loop until all GEM objects are locked
* @exec: drm_exec object
*
* Core functionality of the drm_exec object. Loops until all GEM objects are
* locked and no more contention exists. At the beginning of the loop it is
* guaranteed that no GEM object is locked.
*
* Since labels can't be defined local to the loops body we use a jump pointer
* to make sure that the retry is only used from within the loops body.
*/
#define drm_exec_until_all_locked(exec) \
__PASTE(__drm_exec_, __LINE__): \
for (void *__drm_exec_retry_ptr; ({ \
__drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
(void)__drm_exec_retry_ptr; \
drm_exec_cleanup(exec); \
});)
/**
* drm_exec_retry_on_contention - restart the loop to grap all locks
* @exec: drm_exec object
*
* Control flow helper to continue when a contention was detected and we need to
* clean up and re-start the loop to prepare all GEM objects.
*/
#define drm_exec_retry_on_contention(exec) \
do { \
if (unlikely(drm_exec_is_contended(exec))) \
goto *__drm_exec_retry_ptr; \
} while (0)
/**
* drm_exec_is_contended - check for contention
* @exec: drm_exec object
*
* Returns true if the drm_exec object has run into some contention while
* locking a GEM object and needs to clean up.
*/
static inline bool drm_exec_is_contended(struct drm_exec *exec)
{
return !!exec->contended;
}
void drm_exec_init(struct drm_exec *exec, uint32_t flags);
void drm_exec_fini(struct drm_exec *exec);
bool drm_exec_cleanup(struct drm_exec *exec);
int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
unsigned int num_fences);
int drm_exec_prepare_array(struct drm_exec *exec,
struct drm_gem_object **objects,
unsigned int num_objects,
unsigned int num_fences);
#endif