mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-24 03:15:59 +00:00
drm/xe: Deny unbinds if uapi ufence pending
If user fence was provided for MAP in vm_bind_ioctl and it has still not been signalled, deny UNMAP of said vma with EBUSY as long as unsignalled fence exists. This guarantees that MAP vs UNMAP sequences won't escape under the radar if we ever want to track the client's state wrt to completed and accessible MAPs. By means of intercepting the ufence release signalling. v2: find ufence with num_fences > 1 (Matt) v3: careful on clearing vma ufence (Matt) Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1159 Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240215181152.450082-3-mika.kuoppala@linux.intel.com
This commit is contained in:
parent
977e5b82e0
commit
158900ade9
2 changed files with 44 additions and 0 deletions
|
@ -903,6 +903,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
|||
struct xe_device *xe = vm->xe;
|
||||
bool read_only = xe_vma_read_only(vma);
|
||||
|
||||
if (vma->ufence) {
|
||||
xe_sync_ufence_put(vma->ufence);
|
||||
vma->ufence = NULL;
|
||||
}
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
|
||||
|
||||
|
@ -1622,6 +1627,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
|||
|
||||
trace_xe_vma_unbind(vma);
|
||||
|
||||
if (vma->ufence) {
|
||||
struct xe_user_fence * const f = vma->ufence;
|
||||
|
||||
if (!xe_sync_ufence_get_status(f))
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
vma->ufence = NULL;
|
||||
xe_sync_ufence_put(f);
|
||||
}
|
||||
|
||||
if (number_tiles > 1) {
|
||||
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
||||
GFP_KERNEL);
|
||||
|
@ -1755,6 +1770,21 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct xe_user_fence *
|
||||
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < num_syncs; i++) {
|
||||
struct xe_sync_entry *e = &syncs[i];
|
||||
|
||||
if (xe_sync_is_ufence(e))
|
||||
return xe_sync_ufence_get(e);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, bool immediate, bool first_op,
|
||||
|
@ -1762,9 +1792,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
|||
{
|
||||
struct dma_fence *fence;
|
||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||
struct xe_user_fence *ufence;
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
ufence = find_ufence_get(syncs, num_syncs);
|
||||
if (vma->ufence && ufence)
|
||||
xe_sync_ufence_put(vma->ufence);
|
||||
|
||||
vma->ufence = ufence ?: vma->ufence;
|
||||
|
||||
if (immediate) {
|
||||
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
|
||||
last_op);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
struct xe_bo;
|
||||
struct xe_sync_entry;
|
||||
struct xe_user_fence;
|
||||
struct xe_vm;
|
||||
|
||||
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
|
||||
|
@ -105,6 +106,12 @@ struct xe_vma {
|
|||
* @pat_index: The pat index to use when encoding the PTEs for this vma.
|
||||
*/
|
||||
u16 pat_index;
|
||||
|
||||
/**
|
||||
* @ufence: The user fence that was provided with MAP.
|
||||
* Needs to be signalled before UNMAP can be processed.
|
||||
*/
|
||||
struct xe_user_fence *ufence;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue