linux-stable/drivers/gpu/drm/xe/xe_sched_job.h
Matthew Brost eb9702ad29 drm/xe: Allow num_batch_buffer / num_binds == 0 in IOCTLs
The idea being out-syncs can signal indicating all previous operations
on the bind queue are complete. An example use case of this would be
support for implementing vkQueueWaitIdle easily.

All in-syncs are waited on before signaling out-syncs. This is
implemented by forming a composite software fence of in-syncs and
installing this fence in the out-syncs and exec queue last fence slot.

The last fence must be added as a dependency for jobs on user exec
queues as it is possible for the last fence to be a composite software
fence (unordered, ioctl with zero bb or binds) rather than hardware
fence (ordered, previous job on queue).

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2023-12-21 11:46:09 -05:00

80 lines
1.9 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_SCHED_JOB_H_
#define _XE_SCHED_JOB_H_
#include "xe_sched_job_types.h"
struct xe_vm;
#define XE_SCHED_HANG_LIMIT 1
#define XE_SCHED_JOB_TIMEOUT LONG_MAX
int xe_sched_job_module_init(void);
void xe_sched_job_module_exit(void);
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
u64 *batch_addr);
void xe_sched_job_destroy(struct kref *ref);
/**
* xe_sched_job_get - get reference to XE schedule job
* @job: XE schedule job object
*
* Increment XE schedule job's reference count
*/
static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
{
kref_get(&job->refcount);
return job;
}
/**
* xe_sched_job_put - put reference to XE schedule job
* @job: XE schedule job object
*
* Decrement XE schedule job's reference count, call xe_sched_job_destroy when
* reference count == 0.
*/
static inline void xe_sched_job_put(struct xe_sched_job *job)
{
kref_put(&job->refcount, xe_sched_job_destroy);
}
void xe_sched_job_set_error(struct xe_sched_job *job, int error);
static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
{
return job->fence->error < 0;
}
bool xe_sched_job_started(struct xe_sched_job *job);
bool xe_sched_job_completed(struct xe_sched_job *job);
void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
static inline struct xe_sched_job *
to_xe_sched_job(struct drm_sched_job *drm)
{
return container_of(drm, struct xe_sched_job, drm);
}
static inline u32 xe_sched_job_seqno(struct xe_sched_job *job)
{
return job->fence->seqno;
}
static inline void
xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
{
job->migrate_flush_flags = flags;
}
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
#endif