drm/i915/gt: Make timeslice duration configurable

Execlists uses a scheduling quantum (a timeslice) to alternate execution
between ready-to-run contexts of equal priority. This ensures that all
users (though only if they of equal importance) have the opportunity to
run and prevents livelocks where contexts may have implicit ordering due
to userspace semaphores. However, not all workloads necessarily benefit
from timeslicing and in the extreme some sysadmin may want to disable or
reduce the timeslicing granularity.

The timeslicing mechanism can be compiled out^W^W disabled (but should
DCE!) with

	./scripts/config --set-val DRM_I915_TIMESLICE_DURATION 0

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191029091632.26281-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-10-29 09:16:32 +00:00
parent 4ec37538a6
commit b79029b2e8
6 changed files with 69 additions and 18 deletions

View file

@ -59,3 +59,18 @@ config DRM_I915_STOP_TIMEOUT
damage as the system is reset in order to recover. The corollary is
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
help
When two user batches of equal priority are executing, we will
alternate execution of each batch to ensure forward progress of
all users. This is necessary in some cases where there may be
an implicit dependency between those batches that requires
concurrent execution in order for them to proceed, e.g. they
interact with each other via userspace semaphores. Each context
is scheduled for execution for the timeslice duration, before
switching to the next context.
May be 0 to disable timeslicing.

View file

@ -329,10 +329,19 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return false;
return intel_engine_has_preemption(engine);
}
static inline bool
intel_engine_has_timeslices(const struct intel_engine_cs *engine)
{
if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
return false;
return intel_engine_has_semaphores(engine);
}
#endif /* _INTEL_RINGBUFFER_H_ */

View file

@ -315,6 +315,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
CONFIG_DRM_I915_STOP_TIMEOUT;
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
/*
* To be overridden by the backend on setup. However to facilitate

View file

@ -523,6 +523,7 @@ struct intel_engine_cs {
unsigned long heartbeat_interval_ms;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
} props;
};

View file

@ -1467,7 +1467,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
{
int hint;
if (!intel_engine_has_semaphores(engine))
if (!intel_engine_has_timeslices(engine))
return false;
if (list_is_last(&rq->sched.link, &engine->active.requests))
@ -1488,15 +1488,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
return rq_prio(list_next_entry(rq, sched.link));
}
static bool
enable_timeslice(const struct intel_engine_execlists *execlists)
static inline unsigned long
timeslice(const struct intel_engine_cs *engine)
{
const struct i915_request *rq = *execlists->active;
return READ_ONCE(engine->props.timeslice_duration_ms);
}
static unsigned long
active_timeslice(const struct intel_engine_cs *engine)
{
const struct i915_request *rq = *engine->execlists.active;
if (i915_request_completed(rq))
return false;
return 0;
return execlists->switch_priority_hint >= effective_prio(rq);
if (engine->execlists.switch_priority_hint < effective_prio(rq))
return 0;
return timeslice(engine);
}
static void set_timeslice(struct intel_engine_cs *engine)
{
if (!intel_engine_has_timeslices(engine))
return;
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists)
@ -1667,8 +1684,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
if (!execlists->timer.expires &&
need_timeslice(engine, last))
mod_timer(&execlists->timer,
jiffies + 1);
set_timer_ms(&execlists->timer,
timeslice(engine));
return;
}
@ -2092,10 +2110,7 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_num_ports(execlists) *
sizeof(*execlists->pending));
if (enable_timeslice(execlists))
mod_timer(&execlists->timer, jiffies + 1);
else
cancel_timer(&execlists->timer);
set_timeslice(engine);
WRITE_ONCE(execlists->pending[0], NULL);
} else {

View file

@ -440,6 +440,8 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
@ -514,6 +516,11 @@ static void wait_for_submit(struct intel_engine_cs *engine,
} while (!i915_request_is_active(rq));
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
{
return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
}
static int live_timeslice_queue(void *arg)
{
struct intel_gt *gt = arg;
@ -531,6 +538,8 @@ static int live_timeslice_queue(void *arg)
* ELSP[1] is already occupied, so must rely on timeslicing to
* eject ELSP[0] in favour of the queue.)
*/
if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
@ -608,8 +617,8 @@ static int live_timeslice_queue(void *arg)
err = -EINVAL;
}
/* Timeslice every jiffie, so within 2 we should signal */
if (i915_request_wait(rq, 0, 3) < 0) {
/* Timeslice every jiffy, so within 2 we should signal */
if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@ -1383,7 +1392,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
int err;
/* Preempt cancel non-preemptible spinner in ELSP0 */
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
@ -2030,7 +2039,7 @@ static int live_preempt_timeout(void *arg)
* Check that we force preemption to occur by cancelling the previous
* context if it refuses to yield the GPU.
*/
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))