2010-11-25 18:00:26 +00:00
|
|
|
/*
|
2019-05-28 09:29:49 +00:00
|
|
|
* SPDX-License-Identifier: MIT
|
2010-11-25 18:00:26 +00:00
|
|
|
*
|
2019-05-28 09:29:49 +00:00
|
|
|
* Copyright © 2008,2010 Intel Corporation
|
2010-11-25 18:00:26 +00:00
|
|
|
*/
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
#include <linux/dma-resv.h>
|
2022-03-03 18:19:31 +00:00
|
|
|
#include <linux/highmem.h>
|
2017-01-27 09:40:08 +00:00
|
|
|
#include <linux/sync_file.h>
|
2016-08-04 15:32:42 +00:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
2017-08-15 14:57:33 +00:00
|
|
|
#include <drm/drm_syncobj.h>
|
2016-08-04 15:32:42 +00:00
|
|
|
|
2019-06-13 08:44:16 +00:00
|
|
|
#include "display/intel_frontbuffer.h"
|
|
|
|
|
2019-05-28 09:29:43 +00:00
|
|
|
#include "gem/i915_gem_ioctls.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gt/intel_context.h"
|
2020-12-16 13:54:52 +00:00
|
|
|
#include "gt/intel_gpu_commands.h"
|
2019-06-21 07:08:02 +00:00
|
|
|
#include "gt/intel_gt.h"
|
2020-04-30 11:18:12 +00:00
|
|
|
#include "gt/intel_gt_buffer_pool.h"
|
2019-04-25 05:01:43 +00:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-24 10:03:44 +00:00
|
|
|
#include "gt/intel_ring.h"
|
2019-04-25 05:01:43 +00:00
|
|
|
|
2021-09-24 19:14:45 +00:00
|
|
|
#include "pxp/intel_pxp.h"
|
|
|
|
|
2022-01-07 13:20:44 +00:00
|
|
|
#include "i915_cmd_parser.h"
|
2019-08-06 10:07:30 +00:00
|
|
|
#include "i915_drv.h"
|
2022-02-10 15:45:47 +00:00
|
|
|
#include "i915_file_private.h"
|
2017-02-22 11:40:48 +00:00
|
|
|
#include "i915_gem_clflush.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "i915_gem_context.h"
|
2022-01-07 13:20:45 +00:00
|
|
|
#include "i915_gem_evict.h"
|
2019-08-06 10:07:30 +00:00
|
|
|
#include "i915_gem_ioctls.h"
|
2010-11-25 18:00:26 +00:00
|
|
|
#include "i915_trace.h"
|
2020-08-04 08:59:53 +00:00
|
|
|
#include "i915_user_extensions.h"
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
/** This vma's place in the execbuf reservation list */
|
|
|
|
struct drm_i915_gem_exec_object2 *exec;
|
|
|
|
struct list_head bind_link;
|
|
|
|
struct list_head reloc_link;
|
|
|
|
|
|
|
|
struct hlist_node node;
|
|
|
|
u32 handle;
|
|
|
|
};
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
enum {
|
|
|
|
FORCE_CPU_RELOC = 1,
|
|
|
|
FORCE_GTT_RELOC,
|
|
|
|
FORCE_GPU_RELOC,
|
|
|
|
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
|
|
|
|
};
|
|
|
|
|
2021-03-23 15:49:59 +00:00
|
|
|
/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
|
|
|
|
#define __EXEC_OBJECT_HAS_PIN BIT(30)
|
|
|
|
#define __EXEC_OBJECT_HAS_FENCE BIT(29)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#define __EXEC_OBJECT_USERPTR_INIT BIT(28)
|
|
|
|
#define __EXEC_OBJECT_NEEDS_MAP BIT(27)
|
|
|
|
#define __EXEC_OBJECT_NEEDS_BIAS BIT(26)
|
|
|
|
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */
|
2020-08-19 14:08:44 +00:00
|
|
|
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
#define __EXEC_HAS_RELOC BIT(31)
|
2020-08-19 14:08:52 +00:00
|
|
|
#define __EXEC_ENGINE_PINNED BIT(30)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#define __EXEC_USERPTR_USED BIT(29)
|
|
|
|
#define __EXEC_INTERNAL_FLAGS (~0u << 29)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
#define UPDATE PIN_OFFSET_FIXED
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
|
|
|
|
#define BATCH_OFFSET_BIAS (256*1024)
|
2013-11-26 11:23:15 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
#define __I915_EXEC_ILLEGAL_FLAGS \
|
2018-08-03 23:24:43 +00:00
|
|
|
(__I915_EXEC_UNKNOWN_FLAGS | \
|
|
|
|
I915_EXEC_CONSTANTS_MASK | \
|
|
|
|
I915_EXEC_RESOURCE_STREAMER)
|
2016-08-02 21:50:38 +00:00
|
|
|
|
2018-06-21 08:01:50 +00:00
|
|
|
/* Catch emission of unexpected errors for CI! */
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
|
|
|
#undef EINVAL
|
|
|
|
#define EINVAL ({ \
|
|
|
|
DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
|
|
|
|
22; \
|
|
|
|
})
|
|
|
|
#endif
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/**
|
|
|
|
* DOC: User command execution
|
|
|
|
*
|
|
|
|
* Userspace submits commands to be executed on the GPU as an instruction
|
|
|
|
* stream within a GEM object we call a batchbuffer. This instructions may
|
|
|
|
* refer to other GEM objects containing auxiliary state such as kernels,
|
|
|
|
* samplers, render targets and even secondary batchbuffers. Userspace does
|
|
|
|
* not know where in the GPU memory these objects reside and so before the
|
|
|
|
* batchbuffer is passed to the GPU for execution, those addresses in the
|
|
|
|
* batchbuffer and auxiliary objects are updated. This is known as relocation,
|
|
|
|
* or patching. To try and avoid having to relocate each object on the next
|
|
|
|
* execution, userspace is told the location of those objects in this pass,
|
|
|
|
* but this remains just a hint as the kernel may choose a new location for
|
|
|
|
* any object in the future.
|
|
|
|
*
|
2018-04-06 08:05:57 +00:00
|
|
|
* At the level of talking to the hardware, submitting a batchbuffer for the
|
|
|
|
* GPU to execute is to add content to a buffer from which the HW
|
|
|
|
* command streamer is reading.
|
|
|
|
*
|
|
|
|
* 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
|
|
|
|
* Execlists, this command is not placed on the same buffer as the
|
|
|
|
* remaining items.
|
|
|
|
*
|
|
|
|
* 2. Add a command to invalidate caches to the buffer.
|
|
|
|
*
|
|
|
|
* 3. Add a batchbuffer start command to the buffer; the start command is
|
|
|
|
* essentially a token together with the GPU address of the batchbuffer
|
|
|
|
* to be executed.
|
|
|
|
*
|
|
|
|
* 4. Add a pipeline flush to the buffer.
|
|
|
|
*
|
|
|
|
* 5. Add a memory write command to the buffer to record when the GPU
|
|
|
|
* is done executing the batchbuffer. The memory write writes the
|
|
|
|
* global sequence number of the request, ``i915_request::global_seqno``;
|
|
|
|
* the i915 driver uses the current value in the register to determine
|
|
|
|
* if the GPU has completed the batchbuffer.
|
|
|
|
*
|
|
|
|
* 6. Add a user interrupt command to the buffer. This command instructs
|
|
|
|
* the GPU to issue an interrupt when the command, pipeline flush and
|
|
|
|
* memory write are completed.
|
|
|
|
*
|
|
|
|
* 7. Inform the hardware of the additional commands added to the buffer
|
|
|
|
* (by updating the tail pointer).
|
|
|
|
*
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
* Processing an execbuf ioctl is conceptually split up into a few phases.
|
|
|
|
*
|
|
|
|
* 1. Validation - Ensure all the pointers, handles and flags are valid.
|
|
|
|
* 2. Reservation - Assign GPU address space for every object
|
|
|
|
* 3. Relocation - Update any addresses to point to the final locations
|
|
|
|
* 4. Serialisation - Order the request with respect to its dependencies
|
|
|
|
* 5. Construction - Construct a request to execute the batchbuffer
|
|
|
|
* 6. Submission (at some point in the future execution)
|
|
|
|
*
|
|
|
|
* Reserving resources for the execbuf is the most complicated phase. We
|
|
|
|
* neither want to have to migrate the object in the address space, nor do
|
|
|
|
* we want to have to update any relocations pointing to this object. Ideally,
|
|
|
|
* we want to leave the object where it is and for all the existing relocations
|
|
|
|
* to match. If the object is given a new address, or if userspace thinks the
|
|
|
|
* object is elsewhere, we have to parse all the relocation entries and update
|
|
|
|
* the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
|
|
|
|
* all the target addresses in all of its objects match the value in the
|
|
|
|
* relocation entries and that they all match the presumed offsets given by the
|
|
|
|
* list of execbuffer objects. Using this knowledge, we know that if we haven't
|
|
|
|
* moved any buffers, all the relocation entries are valid and we can skip
|
|
|
|
* the update. (If userspace is wrong, the likely outcome is an impromptu GPU
|
|
|
|
* hang.) The requirement for using I915_EXEC_NO_RELOC are:
|
|
|
|
*
|
|
|
|
* The addresses written in the objects must match the corresponding
|
|
|
|
* reloc.presumed_offset which in turn must match the corresponding
|
|
|
|
* execobject.offset.
|
|
|
|
*
|
|
|
|
* Any render targets written to in the batch must be flagged with
|
|
|
|
* EXEC_OBJECT_WRITE.
|
|
|
|
*
|
|
|
|
* To avoid stalling, execobject.offset should match the current
|
|
|
|
* address of that object within the active context.
|
|
|
|
*
|
|
|
|
* The reservation is done is multiple phases. First we try and keep any
|
|
|
|
* object already bound in its current location - so as long as meets the
|
|
|
|
* constraints imposed by the new execbuffer. Any object left unbound after the
|
|
|
|
* first pass is then fitted into any available idle space. If an object does
|
|
|
|
* not fit, all objects are removed from the reservation and the process rerun
|
|
|
|
* after sorting the objects into a priority order (more difficult to fit
|
|
|
|
* objects are tried first). Failing that, the entire VM is cleared and we try
|
|
|
|
* to fit the execbuf once last time before concluding that it simply will not
|
|
|
|
* fit.
|
|
|
|
*
|
|
|
|
* A small complication to all of this is that we allow userspace not only to
|
|
|
|
* specify an alignment and a size for the object in the address space, but
|
|
|
|
* we also allow userspace to specify the exact offset. This objects are
|
|
|
|
* simpler to place (the location is known a priori) all we have to do is make
|
|
|
|
* sure the space is available.
|
|
|
|
*
|
|
|
|
* Once all the objects are in place, patching up the buried pointers to point
|
|
|
|
* to the final locations is a fairly simple job of walking over the relocation
|
|
|
|
* entry arrays, looking up the right address and rewriting the value into
|
|
|
|
* the object. Simple! ... The relocation entries are stored in user memory
|
|
|
|
* and so to access them we have to copy them into a local buffer. That copy
|
|
|
|
* has to avoid taking any pagefaults as they may lead back to a GEM object
|
|
|
|
* requiring the struct_mutex (i.e. recursive deadlock). So once again we split
|
|
|
|
* the relocation into multiple passes. First we try to do everything within an
|
|
|
|
* atomic context (avoid the pagefaults) which requires that we never wait. If
|
|
|
|
* we detect that we may wait, or if we need to fault, then we have to fallback
|
|
|
|
* to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
|
|
|
|
* bells yet?) Dropping the mutex means that we lose all the state we have
|
|
|
|
* built up so far for the execbuf and we must reset any global data. However,
|
|
|
|
* we do leave the objects pinned in their final locations - which is a
|
|
|
|
* potential issue for concurrent execbufs. Once we have left the mutex, we can
|
|
|
|
* allocate and copy all the relocation entries into a large array at our
|
|
|
|
* leisure, reacquire the mutex, reclaim all the objects and other state and
|
|
|
|
* then proceed to update any incorrect addresses with the objects.
|
|
|
|
*
|
|
|
|
* As we process the relocation entries, we maintain a record of whether the
|
|
|
|
* object is being written to. Using NORELOC, we expect userspace to provide
|
|
|
|
* this information instead. We also check whether we can skip the relocation
|
|
|
|
* by comparing the expected value inside the relocation entry with the target's
|
|
|
|
* final address. If they differ, we have to map the current object and rewrite
|
|
|
|
* the 4 or 8 byte pointer within.
|
|
|
|
*
|
|
|
|
* Serialising an execbuf is quite simple according to the rules of the GEM
|
|
|
|
* ABI. Execution within each context is ordered by the order of submission.
|
|
|
|
* Writes to any GEM object are in order of submission and are exclusive. Reads
|
|
|
|
* from a GEM object are unordered with respect to other reads, but ordered by
|
|
|
|
* writes. A write submitted after a read cannot occur before the read, and
|
|
|
|
* similarly any read submitted after a write cannot occur before the write.
|
|
|
|
* Writes are ordered between engines such that only one write occurs at any
|
|
|
|
* time (completing any reads beforehand) - using semaphores where available
|
|
|
|
* and CPU serialisation otherwise. Other GEM access obey the same rules, any
|
|
|
|
* write (either via mmaps using set-domain, or via pwrite) must flush all GPU
|
|
|
|
* reads before starting, and any read (either using set-domain or pread) must
|
|
|
|
* flush all GPU writes before starting. (Note we only employ a barrier before,
|
|
|
|
* we currently rely on userspace not concurrently starting a new execution
|
|
|
|
* whilst reading or writing to an object. This may be an advantage or not
|
|
|
|
* depending on how much you trust userspace not to shoot themselves in the
|
|
|
|
* foot.) Serialisation may just result in the request being inserted into
|
|
|
|
* a DAG awaiting its turn, but most simple is to wait on the CPU until
|
|
|
|
* all dependencies are resolved.
|
|
|
|
*
|
|
|
|
* After all of that, is just a matter of closing the request and handing it to
|
|
|
|
* the hardware (well, leaving it in a queue to be executed). However, we also
|
|
|
|
* offer the ability for batchbuffers to be run with elevated privileges so
|
|
|
|
* that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
|
|
|
|
* Before any batch is given extra privileges we first must check that it
|
|
|
|
* contains no nefarious instructions, we check that each instruction is from
|
|
|
|
* our whitelist and all registers are also from an allowed list. We first
|
|
|
|
* copy the user's batchbuffer to a shadow (so that the user doesn't have
|
|
|
|
* access to it, either by the CPU or GPU as we scan it) and then parse each
|
|
|
|
* instruction. If everything is ok, we set a flag telling the hardware to run
|
|
|
|
* the batchbuffer in trusted mode, otherwise the ioctl is rejected.
|
|
|
|
*/
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
struct eb_fence {
|
|
|
|
struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
|
|
|
|
struct dma_fence *dma_fence;
|
|
|
|
u64 value;
|
|
|
|
struct dma_fence_chain *chain_fence;
|
|
|
|
};
|
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
struct i915_execbuffer {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_private *i915; /** i915 backpointer */
|
|
|
|
struct drm_file *file; /** per-file lookup tables and limits */
|
|
|
|
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
|
|
|
|
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *vma;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
struct intel_gt *gt; /* gt for the execbuf */
|
2019-04-25 05:01:43 +00:00
|
|
|
struct intel_context *context; /* logical state for the request */
|
|
|
|
struct i915_gem_context *gem_context; /** caller's context */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/** our requests to build */
|
|
|
|
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
|
|
|
|
/** identity of the batch obj/vma */
|
|
|
|
struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1];
|
2019-12-11 23:08:56 +00:00
|
|
|
struct i915_vma *trampoline; /** trampoline used for chaining */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/** used for excl fence in dma_resv objects when > 1 BB submitted */
|
|
|
|
struct dma_fence *composite_fence;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/** actual size of execobj[] as we may extend it for the cmdparser */
|
|
|
|
unsigned int buffer_count;
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/* number of batches in execbuf IOCTL */
|
|
|
|
unsigned int num_batches;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/** list of vma not yet bound during reservation phase */
|
|
|
|
struct list_head unbound;
|
|
|
|
|
|
|
|
/** list of vma that have execobj.relocation_count */
|
|
|
|
struct list_head relocs;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
struct i915_gem_ww_ctx ww;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/**
|
|
|
|
* Track the most recently used object for relocations, as we
|
|
|
|
* frequently have to perform multiple relocations within the same
|
|
|
|
* obj/page
|
|
|
|
*/
|
2017-06-15 08:14:33 +00:00
|
|
|
struct reloc_cache {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_mm_node node; /** temporary GTT binding */
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long vaddr; /** Current kmap address */
|
|
|
|
unsigned long page; /** Currently mapped page index */
|
2021-04-13 05:09:59 +00:00
|
|
|
unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */
|
2017-06-15 08:14:33 +00:00
|
|
|
bool use_64bit_reloc : 1;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
bool has_llc : 1;
|
|
|
|
bool has_fence : 1;
|
|
|
|
bool needs_unfenced : 1;
|
2017-06-15 08:14:33 +00:00
|
|
|
} reloc_cache;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
u64 invalid_flags; /** Set of execobj.flags that are invalid */
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/** Length of batch within object */
|
|
|
|
u64 batch_len[MAX_ENGINE_INSTANCE + 1];
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
u32 batch_start_offset; /** Location within object of batch */
|
|
|
|
u32 batch_flags; /** Flags composed for emit_bb_start() */
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Indicate either the size of the hastable used to resolve
|
|
|
|
* relocation handles, or if negative that we are using a direct
|
|
|
|
* index into the execobj[].
|
|
|
|
*/
|
|
|
|
int lut_size;
|
|
|
|
struct hlist_head *buckets; /** ht for relocation handles */
|
2020-08-04 08:59:53 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
struct eb_fence *fences;
|
|
|
|
unsigned long num_fences;
|
2021-11-29 20:22:45 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
|
|
|
struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1];
|
|
|
|
#endif
|
2010-12-08 10:38:14 +00:00
|
|
|
};
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
static int eb_parse(struct i915_execbuffer *eb);
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
|
2020-08-19 14:08:52 +00:00
|
|
|
static void eb_unpin_engine(struct i915_execbuffer *eb);
|
2021-11-29 20:22:45 +00:00
|
|
|
static void eb_capture_release(struct i915_execbuffer *eb);
|
2020-08-19 14:08:47 +00:00
|
|
|
|
2017-08-26 13:56:20 +00:00
|
|
|
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
return intel_engine_requires_cmd_parser(eb->context->engine) ||
|
|
|
|
(intel_engine_using_cmd_parser(eb->context->engine) &&
|
2018-08-01 16:45:50 +00:00
|
|
|
eb->args->batch_len);
|
2017-08-26 13:56:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
static int eb_create(struct i915_execbuffer *eb)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
|
|
|
|
unsigned int size = 1 + ilog2(eb->buffer_count);
|
2017-06-16 14:05:16 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* Without a 1:1 association between relocation handles and
|
|
|
|
* the execobject[] index, we instead create a hashtable.
|
|
|
|
* We size it dynamically based on available memory, starting
|
|
|
|
* first with 1:1 assocative hash and scaling back until
|
|
|
|
* the allocation succeeds.
|
|
|
|
*
|
|
|
|
* Later on we use a positive lut_size to indicate we are
|
|
|
|
* using this hashtable, and a negative value to indicate a
|
|
|
|
* direct lookup.
|
|
|
|
*/
|
2017-06-16 14:05:16 +00:00
|
|
|
do {
|
2017-09-01 14:57:28 +00:00
|
|
|
gfp_t flags;
|
2017-06-29 15:04:25 +00:00
|
|
|
|
|
|
|
/* While we can still reduce the allocation size, don't
|
|
|
|
* raise a warning and allow the allocation to fail.
|
|
|
|
* On the last pass though, we want to try as hard
|
|
|
|
* as possible to perform the allocation and warn
|
|
|
|
* if it fails.
|
|
|
|
*/
|
2017-09-13 23:28:29 +00:00
|
|
|
flags = GFP_KERNEL;
|
2017-06-29 15:04:25 +00:00
|
|
|
if (size > 1)
|
|
|
|
flags |= __GFP_NORETRY | __GFP_NOWARN;
|
|
|
|
|
2017-06-16 14:05:16 +00:00
|
|
|
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
|
2017-06-29 15:04:25 +00:00
|
|
|
flags);
|
2017-06-16 14:05:16 +00:00
|
|
|
if (eb->buckets)
|
|
|
|
break;
|
|
|
|
} while (--size);
|
|
|
|
|
2020-08-19 14:08:44 +00:00
|
|
|
if (unlikely(!size))
|
2017-06-29 15:04:25 +00:00
|
|
|
return -ENOMEM;
|
2013-01-08 10:53:17 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->lut_size = size;
|
2017-06-15 08:14:33 +00:00
|
|
|
} else {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb->lut_size = -eb->buffer_count;
|
2017-06-15 08:14:33 +00:00
|
|
|
}
|
2013-01-08 10:53:17 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
return 0;
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static bool
|
|
|
|
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
|
2017-08-16 08:52:06 +00:00
|
|
|
const struct i915_vma *vma,
|
|
|
|
unsigned int flags)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
|
|
|
if (vma->node.size < entry->pad_to_size)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & EXEC_OBJECT_PINNED &&
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
vma->node.start != entry->offset)
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
vma->node.start < BATCH_OFFSET_BIAS)
|
|
|
|
return true;
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
|
2020-12-16 09:29:51 +00:00
|
|
|
(vma->node.start + vma->node.size + 4095) >> 32)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return true;
|
|
|
|
|
2017-10-31 10:36:07 +00:00
|
|
|
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
|
|
|
|
!i915_vma_is_map_and_fenceable(vma))
|
|
|
|
return true;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
|
|
|
|
unsigned int exec_flags)
|
|
|
|
{
|
|
|
|
u64 pin_flags = 0;
|
|
|
|
|
|
|
|
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
|
|
|
|
pin_flags |= PIN_GLOBAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
|
|
|
* limit address to the first 4GBs for unflagged objects.
|
|
|
|
*/
|
|
|
|
if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
|
|
|
pin_flags |= PIN_ZONE_4G;
|
|
|
|
|
|
|
|
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
|
|
|
|
pin_flags |= PIN_MAPPABLE;
|
|
|
|
|
|
|
|
if (exec_flags & EXEC_OBJECT_PINNED)
|
|
|
|
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
|
|
|
|
else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
|
|
|
|
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
|
|
|
|
|
|
|
return pin_flags;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:49:53 +00:00
|
|
|
static inline int
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_pin_vma(struct i915_execbuffer *eb,
|
2017-08-16 08:52:06 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-03-03 20:43:44 +00:00
|
|
|
struct i915_vma *vma = ev->vma;
|
2017-08-16 08:52:06 +00:00
|
|
|
u64 pin_flags;
|
2021-03-23 15:49:53 +00:00
|
|
|
int err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-06-16 14:05:21 +00:00
|
|
|
if (vma->node.size)
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags = vma->node.start;
|
2017-06-16 14:05:21 +00:00
|
|
|
else
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags = entry->offset & PIN_OFFSET_MASK;
|
2017-06-16 14:05:21 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE;
|
2020-03-03 20:43:44 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
|
2017-08-16 08:52:06 +00:00
|
|
|
pin_flags |= PIN_GLOBAL;
|
2017-06-16 14:05:21 +00:00
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
/* Attempt to reuse the current location if available */
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
|
|
|
|
if (err == -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (unlikely(err)) {
|
2020-04-01 19:41:35 +00:00
|
|
|
if (entry->flags & EXEC_OBJECT_PINNED)
|
2021-03-23 15:49:53 +00:00
|
|
|
return err;
|
2020-04-01 19:41:35 +00:00
|
|
|
|
|
|
|
/* Failing that pick any _free_ space if suitable */
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww,
|
2020-08-19 14:08:54 +00:00
|
|
|
entry->pad_to_size,
|
|
|
|
entry->alignment,
|
|
|
|
eb_pin_flags(entry, ev->flags) |
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
PIN_USER | PIN_NOEVICT | PIN_VALIDATE);
|
2021-03-23 15:49:53 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
2020-04-01 19:41:35 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
2021-03-23 15:49:53 +00:00
|
|
|
err = i915_vma_pin_fence(vma);
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (unlikely(err))
|
2021-03-23 15:49:53 +00:00
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-10-09 08:43:56 +00:00
|
|
|
if (vma->fence)
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_PIN;
|
2021-03-23 15:49:53 +00:00
|
|
|
if (eb_vma_misplaced(entry, vma, ev->flags))
|
|
|
|
return -EBADSLT;
|
|
|
|
|
|
|
|
return 0;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:44 +00:00
|
|
|
static inline void
|
|
|
|
eb_unreserve_vma(struct eb_vma *ev)
|
|
|
|
{
|
2020-08-19 14:08:48 +00:00
|
|
|
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
|
|
|
|
__i915_vma_unpin_fence(ev->vma);
|
|
|
|
|
2020-08-19 14:08:44 +00:00
|
|
|
ev->flags &= ~__EXEC_OBJECT_RESERVED;
|
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static int
|
|
|
|
eb_validate_vma(struct i915_execbuffer *eb,
|
|
|
|
struct drm_i915_gem_exec_object2 *entry,
|
|
|
|
struct i915_vma *vma)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
2021-03-17 23:40:11 +00:00
|
|
|
/* Relocations are disallowed for all platforms after TGL-LP. This
|
|
|
|
* also covers all platforms with local memory.
|
|
|
|
*/
|
|
|
|
if (entry->relocation_count &&
|
2021-06-05 15:53:54 +00:00
|
|
|
GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
|
2021-03-17 23:40:11 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (unlikely(entry->flags & eb->invalid_flags))
|
|
|
|
return -EINVAL;
|
2017-06-15 08:14:34 +00:00
|
|
|
|
2020-03-05 20:35:34 +00:00
|
|
|
if (unlikely(entry->alignment &&
|
|
|
|
!is_power_of_2_u64(entry->alignment)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offset can be used as input (EXEC_OBJECT_PINNED), reject
|
|
|
|
* any non-page-aligned or non-canonical addresses.
|
|
|
|
*/
|
|
|
|
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
|
2018-10-25 09:18:23 +00:00
|
|
|
entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* pad_to_size was once a reserved field, so sanitize it */
|
|
|
|
if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
|
|
|
|
if (unlikely(offset_in_page(entry->pad_to_size)))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
entry->pad_to_size = 0;
|
2017-06-15 08:14:34 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* From drm_mm perspective address space is continuous,
|
|
|
|
* so from this point we're always using non-canonical
|
|
|
|
* form internally.
|
|
|
|
*/
|
|
|
|
entry->offset = gen8_noncanonical_addr(entry->offset);
|
|
|
|
|
2017-08-16 08:52:06 +00:00
|
|
|
if (!eb->reloc_cache.has_fence) {
|
|
|
|
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
|
|
|
} else {
|
|
|
|
if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
|
|
|
eb->reloc_cache.needs_unfenced) &&
|
|
|
|
i915_gem_object_is_tiled(vma->obj))
|
|
|
|
entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
|
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return 0;
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static inline bool
|
|
|
|
is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
|
|
|
|
{
|
|
|
|
return eb->args->flags & I915_EXEC_BATCH_FIRST ?
|
|
|
|
buffer_idx < eb->num_batches :
|
|
|
|
buffer_idx >= eb->args->buffer_count - eb->num_batches;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-06-10 19:43:09 +00:00
|
|
|
eb_add_vma(struct i915_execbuffer *eb,
|
2021-10-14 17:20:00 +00:00
|
|
|
unsigned int *current_batch,
|
|
|
|
unsigned int i,
|
2018-06-10 19:43:09 +00:00
|
|
|
struct i915_vma *vma)
|
2016-08-04 15:32:31 +00:00
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2017-08-16 08:52:06 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
ev->vma = vma;
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->exec = entry;
|
|
|
|
ev->flags = entry->flags;
|
|
|
|
|
2017-06-29 15:04:25 +00:00
|
|
|
if (eb->lut_size > 0) {
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->handle = entry->handle;
|
|
|
|
hlist_add_head(&ev->node,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
&eb->buckets[hash_32(entry->handle,
|
|
|
|
eb->lut_size)]);
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
2016-08-04 15:32:31 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (entry->relocation_count)
|
2020-03-03 20:43:44 +00:00
|
|
|
list_add_tail(&ev->reloc_link, &eb->relocs);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2018-06-10 19:43:09 +00:00
|
|
|
/*
|
|
|
|
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
|
|
|
* to negative relocation deltas. Usually that works out ok since the
|
|
|
|
* relocate address is still positive, except when the batch is placed
|
|
|
|
* very low in the GTT. Ensure this doesn't happen.
|
|
|
|
*
|
|
|
|
* Note that actual hangs have only been observed on gen7, but for
|
|
|
|
* paranoia do it everywhere.
|
|
|
|
*/
|
2021-10-14 17:20:00 +00:00
|
|
|
if (is_batch_buffer(eb, i)) {
|
2018-06-21 07:32:05 +00:00
|
|
|
if (entry->relocation_count &&
|
2020-03-03 20:43:44 +00:00
|
|
|
!(ev->flags & EXEC_OBJECT_PINNED))
|
|
|
|
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
2018-06-10 19:43:09 +00:00
|
|
|
if (eb->reloc_cache.has_fence)
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
|
2018-06-10 19:43:09 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
eb->batches[*current_batch] = ev;
|
|
|
|
|
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) {
|
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Attempting to use self-modifying batch buffer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_overflows_t(u64,
|
|
|
|
eb->batch_start_offset,
|
|
|
|
eb->args->batch_len,
|
|
|
|
ev->vma->size)) {
|
|
|
|
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb->args->batch_len == 0)
|
|
|
|
eb->batch_len[*current_batch] = ev->vma->size -
|
|
|
|
eb->batch_start_offset;
|
|
|
|
else
|
|
|
|
eb->batch_len[*current_batch] = eb->args->batch_len;
|
|
|
|
if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
|
|
|
|
drm_dbg(&i915->drm, "Invalid batch length\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
++*current_batch;
|
2018-06-10 19:43:09 +00:00
|
|
|
}
|
2021-10-14 17:20:00 +00:00
|
|
|
|
|
|
|
return 0;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
static inline int use_cpu_reloc(const struct reloc_cache *cache,
|
|
|
|
const struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
if (!i915_gem_object_has_struct_page(obj))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return (cache->has_llc ||
|
|
|
|
obj->cache_dirty ||
|
|
|
|
obj->cache_level != I915_CACHE_NONE);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static int eb_reserve_vma(struct i915_execbuffer *eb,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev,
|
2020-03-03 20:43:43 +00:00
|
|
|
u64 pin_flags)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-03-03 20:43:44 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
|
|
|
struct i915_vma *vma = ev->vma;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
if (drm_mm_node_allocated(&vma->node) &&
|
|
|
|
eb_vma_misplaced(entry, vma, ev->flags)) {
|
|
|
|
err = i915_vma_unbind(vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww,
|
2017-08-16 08:52:06 +00:00
|
|
|
entry->pad_to_size, entry->alignment,
|
2020-04-01 19:41:35 +00:00
|
|
|
eb_pin_flags(entry, ev->flags) | pin_flags);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (entry->offset != vma->node.start) {
|
|
|
|
entry->offset = vma->node.start | UPDATE;
|
|
|
|
eb->args->flags |= __EXEC_HAS_RELOC;
|
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
2017-10-09 08:43:56 +00:00
|
|
|
err = i915_vma_pin_fence(vma);
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (unlikely(err))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
|
|
|
|
2017-10-09 08:43:56 +00:00
|
|
|
if (vma->fence)
|
2020-04-01 19:41:35 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-01 19:41:35 +00:00
|
|
|
ev->flags |= __EXEC_OBJECT_HAS_PIN;
|
2020-03-03 20:43:44 +00:00
|
|
|
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
|
2017-07-21 14:50:35 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
static bool eb_unbind(struct i915_execbuffer *eb, bool force)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
unsigned int i;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct list_head last;
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
bool unpinned = false;
|
|
|
|
|
|
|
|
/* Resort *all* the objects into priority order */
|
|
|
|
INIT_LIST_HEAD(&eb->unbound);
|
|
|
|
INIT_LIST_HEAD(&last);
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
unsigned int flags = ev->flags;
|
|
|
|
|
|
|
|
if (!force && flags & EXEC_OBJECT_PINNED &&
|
|
|
|
flags & __EXEC_OBJECT_HAS_PIN)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unpinned = true;
|
|
|
|
eb_unreserve_vma(ev);
|
|
|
|
|
|
|
|
if (flags & EXEC_OBJECT_PINNED)
|
|
|
|
/* Pinned must have their slot */
|
|
|
|
list_add(&ev->bind_link, &eb->unbound);
|
|
|
|
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
|
|
|
|
/* Map require the lowest 256MiB (aperture) */
|
|
|
|
list_add_tail(&ev->bind_link, &eb->unbound);
|
|
|
|
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
|
|
|
/* Prioritise 4GiB region for restricted bo */
|
|
|
|
list_add(&ev->bind_link, &last);
|
|
|
|
else
|
|
|
|
list_add_tail(&ev->bind_link, &last);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_splice_tail(&last, &eb->unbound);
|
|
|
|
return unpinned;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_reserve(struct i915_execbuffer *eb)
|
|
|
|
{
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
unsigned int pass;
|
2020-03-06 07:16:14 +00:00
|
|
|
int err = 0;
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
bool unpinned;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to pin all of the buffers into the GTT.
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
* This is done in 2 phases:
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
* 1. Unbind all objects that do not match the GTT constraints for
|
|
|
|
* the execbuffer (fenceable, mappable, alignment etc).
|
|
|
|
* 2. Bind new objects.
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*
|
|
|
|
* This avoid unnecessary unbinding of later objects in order to make
|
|
|
|
* room for the earlier objects *unless* we need to defragment.
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
*
|
|
|
|
* Defragmenting is skipped if all objects are pinned at a fixed location.
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*/
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
for (pass = 0; pass <= 2; pass++) {
|
|
|
|
int pin_flags = PIN_USER | PIN_VALIDATE;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (pass == 0)
|
|
|
|
pin_flags |= PIN_NONBLOCK;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (pass >= 1)
|
|
|
|
unpinned = eb_unbind(eb, pass == 2);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (pass == 2) {
|
|
|
|
err = mutex_lock_interruptible(&eb->context->vm->mutex);
|
|
|
|
if (!err) {
|
|
|
|
err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
|
|
|
|
mutex_unlock(&eb->context->vm->mutex);
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:48 +00:00
|
|
|
return err;
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
list_for_each_entry(ev, &eb->unbound, bind_link) {
|
|
|
|
err = eb_reserve_vma(eb, ev, pin_flags);
|
|
|
|
if (err)
|
|
|
|
break;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
2020-03-03 20:43:43 +00:00
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (err != -ENOSPC)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
2016-08-04 15:32:31 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static int eb_select_context(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
|
|
|
|
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
|
2021-07-08 15:48:27 +00:00
|
|
|
if (unlikely(IS_ERR(ctx)))
|
|
|
|
return PTR_ERR(ctx);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2019-04-25 05:01:43 +00:00
|
|
|
eb->gem_context = ctx;
|
2021-09-02 14:20:53 +00:00
|
|
|
if (i915_gem_context_has_full_ppgtt(ctx))
|
2018-09-01 09:24:51 +00:00
|
|
|
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static int __eb_add_lut(struct i915_execbuffer *eb,
|
|
|
|
u32 handle, struct i915_vma *vma)
|
2013-01-08 10:53:14 +00:00
|
|
|
{
|
2020-03-23 09:28:41 +00:00
|
|
|
struct i915_gem_context *ctx = eb->gem_context;
|
|
|
|
struct i915_lut_handle *lut;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2013-01-08 10:53:14 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
lut = i915_lut_handle_alloc();
|
|
|
|
if (unlikely(!lut))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_vma_get(vma);
|
|
|
|
if (!atomic_fetch_inc(&vma->open_count))
|
|
|
|
i915_vma_reopen(vma);
|
|
|
|
lut->handle = handle;
|
|
|
|
lut->ctx = ctx;
|
|
|
|
|
|
|
|
/* Check that the context hasn't been closed in the meantime */
|
|
|
|
err = -EINTR;
|
2020-07-03 00:43:06 +00:00
|
|
|
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
|
2021-09-02 14:20:50 +00:00
|
|
|
if (likely(!i915_gem_context_is_closed(ctx)))
|
2020-03-23 09:28:41 +00:00
|
|
|
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
|
2020-07-03 00:43:06 +00:00
|
|
|
else
|
|
|
|
err = -ENOENT;
|
2020-03-23 09:28:41 +00:00
|
|
|
if (err == 0) { /* And nor has this handle */
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
|
2020-07-01 08:44:39 +00:00
|
|
|
spin_lock(&obj->lut_lock);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (idr_find(&eb->file->object_idr, handle) == obj) {
|
|
|
|
list_add(&lut->obj_link, &obj->lut_list);
|
|
|
|
} else {
|
|
|
|
radix_tree_delete(&ctx->handles_vma, handle);
|
|
|
|
err = -ENOENT;
|
|
|
|
}
|
2020-07-01 08:44:39 +00:00
|
|
|
spin_unlock(&obj->lut_lock);
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2020-07-03 00:43:06 +00:00
|
|
|
mutex_unlock(&ctx->lut_mutex);
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err;
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
return 0;
|
2017-06-15 08:14:34 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
err:
|
2020-04-22 19:05:58 +00:00
|
|
|
i915_vma_close(vma);
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_vma_put(vma);
|
|
|
|
i915_lut_handle_free(lut);
|
|
|
|
return err;
|
|
|
|
}
|
2018-06-10 19:43:09 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
|
|
|
{
|
2020-07-03 00:43:06 +00:00
|
|
|
struct i915_address_space *vm = eb->context->vm;
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
do {
|
|
|
|
struct drm_i915_gem_object *obj;
|
2017-08-16 08:52:07 +00:00
|
|
|
struct i915_vma *vma;
|
2020-03-23 09:28:41 +00:00
|
|
|
int err;
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
|
2020-07-03 00:43:06 +00:00
|
|
|
if (likely(vma && vma->vm == vm))
|
2020-03-23 09:28:41 +00:00
|
|
|
vma = i915_vma_tryget(vma);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (likely(vma))
|
|
|
|
return vma;
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2017-08-16 08:52:07 +00:00
|
|
|
obj = i915_gem_object_lookup(eb->file, handle);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (unlikely(!obj))
|
|
|
|
return ERR_PTR(-ENOENT);
|
2013-01-08 10:53:14 +00:00
|
|
|
|
2021-09-24 19:14:45 +00:00
|
|
|
/*
|
|
|
|
* If the user has opted-in for protected-object tracking, make
|
|
|
|
* sure the object encryption can be used.
|
|
|
|
* We only need to do this when the object is first used with
|
|
|
|
* this context, because the context itself will be banned when
|
|
|
|
* the protected objects become invalid.
|
|
|
|
*/
|
|
|
|
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
|
|
|
|
i915_gem_object_is_protected(obj)) {
|
2021-09-24 19:14:48 +00:00
|
|
|
err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
|
2021-09-24 19:14:45 +00:00
|
|
|
if (err) {
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-03 00:43:06 +00:00
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
2019-02-21 02:08:19 +00:00
|
|
|
if (IS_ERR(vma)) {
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return vma;
|
drm/i915: Convert execbuf code to use vmas
In order to transition more of our code over to using a VMA instead of
an <OBJ, VM> pair - we must have the vma accessible at execbuf time. Up
until now, we've only had a VMA when actually binding an object.
The previous patch helped handle the distinction on bound vs. unbound.
This patch will help us catch leaks, and other issues before we actually
shuffle a bunch of stuff around.
This attempts to convert all the execbuf code to speak in vmas. Since
the execbuf code is very self contained it was a nice isolated
conversion.
The meat of the code is about turning eb_objects into eb_vma, and then
wiring up the rest of the code to use vmas instead of obj, vm pairs.
Unfortunately, to do this, we must move the exec_list link from the obj
structure. This list is reused in the eviction code, so we must also
modify the eviction code to make this work.
WARNING: This patch makes an already hotly profiled path slower. The cost is
unavoidable. In reply to this mail, I will attach the extra data.
v2: Release table lock early, and two a 2 phase vma lookup to avoid
having to use a GFP_ATOMIC. (Chris)
v3: s/obj_exec_list/obj_exec_link/
Updates to address
commit 6d2b888569d366beb4be72cacfde41adee2c25e1
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed Aug 7 18:30:54 2013 +0100
drm/i915: List objects allocated from stolen memory in debugfs
v4: Use obj = vma->obj for neatness in some places (Chris)
need_reloc_mappable() should return false if ppgtt (Chris)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Split out prep patches. Also remove a FIXME comment which is
now taken care of.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-14 09:38:36 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
err = __eb_add_lut(eb, handle, vma);
|
|
|
|
if (likely(!err))
|
|
|
|
return vma;
|
2017-08-16 08:52:08 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
if (err != -EEXIST)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
} while (1);
|
|
|
|
}
|
2017-06-16 14:05:16 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
unsigned int i, current_batch = 0;
|
2020-03-23 09:28:41 +00:00
|
|
|
int err = 0;
|
2019-06-06 11:23:20 +00:00
|
|
|
|
2020-03-23 09:28:41 +00:00
|
|
|
INIT_LIST_HEAD(&eb->relocs);
|
|
|
|
|
|
|
|
for (i = 0; i < eb->buffer_count; i++) {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
|
|
|
vma = eb_lookup_vma(eb, eb->exec[i].handle);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
2020-08-19 14:08:47 +00:00
|
|
|
goto err;
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2017-08-16 08:52:08 +00:00
|
|
|
|
2020-03-03 20:43:45 +00:00
|
|
|
err = eb_validate_vma(eb, &eb->exec[i], vma);
|
2020-03-23 09:28:41 +00:00
|
|
|
if (unlikely(err)) {
|
|
|
|
i915_vma_put(vma);
|
2020-08-19 14:08:47 +00:00
|
|
|
goto err;
|
2020-03-23 09:28:41 +00:00
|
|
|
}
|
2017-06-16 14:05:20 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
err = eb_add_vma(eb, ¤t_batch, i, vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
|
|
|
if (i915_gem_object_is_userptr(vma->obj)) {
|
|
|
|
err = i915_gem_object_userptr_submit_init(vma->obj);
|
|
|
|
if (err) {
|
|
|
|
if (i + 1 < eb->buffer_count) {
|
|
|
|
/*
|
|
|
|
* Execbuffer code expects last vma entry to be NULL,
|
|
|
|
* since we already initialized this entry,
|
|
|
|
* set the next value to NULL or we mess up
|
|
|
|
* cleanup handling.
|
|
|
|
*/
|
|
|
|
eb->vma[i + 1].vma = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
|
|
|
|
eb->args->flags |= __EXEC_USERPTR_USED;
|
|
|
|
}
|
2017-06-16 14:05:16 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2020-03-03 20:43:44 +00:00
|
|
|
eb->vma[i].vma = NULL;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2013-01-08 10:53:14 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 11:36:00 +00:00
|
|
|
static int eb_lock_vmas(struct i915_execbuffer *eb)
|
2020-08-19 14:08:48 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < eb->buffer_count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
|
|
|
|
err = i915_gem_object_lock(vma->obj, &eb->ww);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2021-06-15 11:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_validate_vmas(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&eb->unbound);
|
|
|
|
|
|
|
|
err = eb_lock_vmas(eb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i < eb->buffer_count; i++) {
|
|
|
|
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
2020-08-19 14:08:48 +00:00
|
|
|
|
2021-03-23 15:49:53 +00:00
|
|
|
err = eb_pin_vma(eb, entry, ev);
|
|
|
|
if (err == -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!err) {
|
2020-08-19 14:08:48 +00:00
|
|
|
if (entry->offset != vma->node.start) {
|
|
|
|
entry->offset = vma->node.start | UPDATE;
|
|
|
|
eb->args->flags |= __EXEC_HAS_RELOC;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
eb_unreserve_vma(ev);
|
|
|
|
|
|
|
|
list_add_tail(&ev->bind_link, &eb->unbound);
|
|
|
|
if (drm_mm_node_allocated(&vma->node)) {
|
|
|
|
err = i915_vma_unbind(vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-25 09:59:55 +00:00
|
|
|
/* Reserve enough slots to accommodate composite fences */
|
|
|
|
err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
|
2021-11-16 14:20:45 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2021-03-23 15:49:59 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
|
|
|
|
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&eb->unbound))
|
|
|
|
return eb_reserve(eb);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
static struct eb_vma *
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
|
2010-12-08 10:38:14 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (eb->lut_size < 0) {
|
|
|
|
if (handle >= -eb->lut_size)
|
2013-01-08 10:53:17 +00:00
|
|
|
return NULL;
|
2020-03-03 20:43:44 +00:00
|
|
|
return &eb->vma[handle];
|
2013-01-08 10:53:17 +00:00
|
|
|
} else {
|
|
|
|
struct hlist_head *head;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
2010-12-08 10:38:14 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
head = &eb->buckets[hash_32(handle, eb->lut_size)];
|
2020-03-03 20:43:44 +00:00
|
|
|
hlist_for_each_entry(ev, head, node) {
|
|
|
|
if (ev->handle == handle)
|
|
|
|
return ev;
|
2013-01-08 10:53:17 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-12-08 10:38:14 +00:00
|
|
|
}
|
|
|
|
|
2021-06-10 14:35:25 +00:00
|
|
|
static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
|
2020-08-19 14:08:44 +00:00
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
|
|
|
|
if (!vma)
|
|
|
|
break;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
eb_unreserve_vma(ev);
|
2020-08-19 14:08:44 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (final)
|
|
|
|
i915_vma_put(vma);
|
2020-08-19 14:08:44 +00:00
|
|
|
}
|
2020-08-19 14:08:52 +00:00
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
eb_capture_release(eb);
|
2020-08-19 14:08:52 +00:00
|
|
|
eb_unpin_engine(eb);
|
2020-08-19 14:08:44 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static void eb_destroy(const struct i915_execbuffer *eb)
|
2015-12-29 17:24:52 +00:00
|
|
|
{
|
2017-06-29 15:04:25 +00:00
|
|
|
if (eb->lut_size > 0)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
kfree(eb->buckets);
|
2015-12-29 17:24:52 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static inline u64
|
2016-08-18 16:16:52 +00:00
|
|
|
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const struct i915_vma *target)
|
2015-12-29 17:24:52 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return gen8_canonical_addr((int)reloc->delta + target->node.start);
|
2015-12-29 17:24:52 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 16:16:52 +00:00
|
|
|
static void reloc_cache_init(struct reloc_cache *cache,
|
|
|
|
struct drm_i915_private *i915)
|
2013-08-21 16:10:51 +00:00
|
|
|
{
|
2020-09-08 05:41:17 +00:00
|
|
|
cache->page = -1;
|
|
|
|
cache->vaddr = 0;
|
2016-11-03 08:39:46 +00:00
|
|
|
/* Must be a variable in the struct to allow GCC to unroll. */
|
2021-04-13 05:09:59 +00:00
|
|
|
cache->graphics_ver = GRAPHICS_VER(i915);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
cache->has_llc = HAS_LLC(i915);
|
2016-11-03 08:39:46 +00:00
|
|
|
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
|
2021-04-13 05:09:59 +00:00
|
|
|
cache->has_fence = cache->graphics_ver < 4;
|
2017-06-16 14:05:24 +00:00
|
|
|
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
|
2019-10-03 21:00:59 +00:00
|
|
|
cache->node.flags = 0;
|
2016-08-18 16:16:52 +00:00
|
|
|
}
|
2013-08-21 16:10:51 +00:00
|
|
|
|
2020-09-08 05:41:43 +00:00
|
|
|
static inline void *unmask_page(unsigned long p)
|
|
|
|
{
|
|
|
|
return (void *)(uintptr_t)(p & PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int unmask_flags(unsigned long p)
|
|
|
|
{
|
|
|
|
return p & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define KMAP 0x4 /* after CLFLUSH_FLAGS */
|
|
|
|
|
|
|
|
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 =
|
|
|
|
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
|
2021-12-19 21:24:57 +00:00
|
|
|
return to_gt(i915)->ggtt;
|
2020-09-08 05:41:43 +00:00
|
|
|
}
|
|
|
|
|
2021-12-21 20:00:50 +00:00
|
|
|
static void reloc_cache_unmap(struct reloc_cache *cache)
|
|
|
|
{
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (!cache->vaddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
|
|
if (cache->vaddr & KMAP)
|
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
else
|
|
|
|
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reloc_cache_remap(struct reloc_cache *cache,
|
|
|
|
struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (!cache->vaddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cache->vaddr & KMAP) {
|
|
|
|
struct page *page = i915_gem_object_get_page(obj, cache->page);
|
|
|
|
|
|
|
|
vaddr = kmap_atomic(page);
|
|
|
|
cache->vaddr = unmask_flags(cache->vaddr) |
|
|
|
|
(unsigned long)vaddr;
|
|
|
|
} else {
|
|
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
unsigned long offset;
|
|
|
|
|
|
|
|
offset = cache->node.start;
|
|
|
|
if (!drm_mm_node_allocated(&cache->node))
|
|
|
|
offset += cache->page << PAGE_SHIFT;
|
|
|
|
|
|
|
|
cache->vaddr = (unsigned long)
|
|
|
|
io_mapping_map_atomic_wc(&ggtt->iomap, offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
|
2020-09-08 05:41:17 +00:00
|
|
|
{
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (!cache->vaddr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
|
|
if (cache->vaddr & KMAP) {
|
2020-08-19 14:08:46 +00:00
|
|
|
struct drm_i915_gem_object *obj =
|
|
|
|
(struct drm_i915_gem_object *)cache->node.mm;
|
2020-09-08 05:41:17 +00:00
|
|
|
if (cache->vaddr & CLFLUSH_AFTER)
|
|
|
|
mb();
|
|
|
|
|
|
|
|
kunmap_atomic(vaddr);
|
2020-08-19 14:08:46 +00:00
|
|
|
i915_gem_object_finish_access(obj);
|
2020-09-08 05:41:17 +00:00
|
|
|
} else {
|
|
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
|
|
|
|
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
|
|
|
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
|
|
|
|
|
|
|
if (drm_mm_node_allocated(&cache->node)) {
|
|
|
|
ggtt->vm.clear_range(&ggtt->vm,
|
|
|
|
cache->node.start,
|
|
|
|
cache->node.size);
|
|
|
|
mutex_lock(&ggtt->vm.mutex);
|
|
|
|
drm_mm_remove_node(&cache->node);
|
|
|
|
mutex_unlock(&ggtt->vm.mutex);
|
|
|
|
} else {
|
|
|
|
i915_vma_unpin((struct i915_vma *)cache->node.mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->vaddr = 0;
|
|
|
|
cache->page = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
|
|
|
struct reloc_cache *cache,
|
2020-08-19 14:08:41 +00:00
|
|
|
unsigned long pageno)
|
2020-09-08 05:41:17 +00:00
|
|
|
{
|
|
|
|
void *vaddr;
|
2020-08-19 14:08:41 +00:00
|
|
|
struct page *page;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
if (cache->vaddr) {
|
|
|
|
kunmap_atomic(unmask_page(cache->vaddr));
|
|
|
|
} else {
|
|
|
|
unsigned int flushes;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = i915_gem_object_prepare_write(obj, &flushes);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
|
|
|
|
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
|
|
|
|
|
|
|
|
cache->vaddr = flushes | KMAP;
|
|
|
|
cache->node.mm = (void *)obj;
|
|
|
|
if (flushes)
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:41 +00:00
|
|
|
page = i915_gem_object_get_page(obj, pageno);
|
|
|
|
if (!obj->mm.dirty)
|
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
|
|
vaddr = kmap_atomic(page);
|
2020-09-08 05:41:17 +00:00
|
|
|
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
|
2020-08-19 14:08:41 +00:00
|
|
|
cache->page = pageno;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
static void *reloc_iomap(struct i915_vma *batch,
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long page)
|
|
|
|
{
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
struct drm_i915_gem_object *obj = batch->obj;
|
2020-08-19 14:08:54 +00:00
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
|
|
|
unsigned long offset;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (cache->vaddr) {
|
|
|
|
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
|
|
|
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
|
|
|
|
} else {
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
struct i915_vma *vma = ERR_PTR(-ENODEV);
|
2020-09-08 05:41:17 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (i915_gem_object_is_tiled(obj))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (use_cpu_reloc(cache, obj))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
/*
|
|
|
|
* i915_gem_object_ggtt_pin_ww may attempt to remove the batch
|
|
|
|
* VMA from the object list because we no longer pin.
|
|
|
|
*
|
|
|
|
* Only attempt to pin the batch buffer to ggtt if the current batch
|
|
|
|
* is not inside ggtt, or the batch buffer is not misplaced.
|
|
|
|
*/
|
2022-05-11 11:52:19 +00:00
|
|
|
if (!i915_is_ggtt(batch->vm) ||
|
|
|
|
!i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) {
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
|
|
|
|
PIN_MAPPABLE |
|
|
|
|
PIN_NONBLOCK /* NOWARN */ |
|
|
|
|
PIN_NOEVICT);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
if (vma == ERR_PTR(-EDEADLK))
|
|
|
|
return vma;
|
|
|
|
|
2020-09-08 05:41:17 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
memset(&cache->node, 0, sizeof(cache->node));
|
|
|
|
mutex_lock(&ggtt->vm.mutex);
|
|
|
|
err = drm_mm_insert_node_in_range
|
|
|
|
(&ggtt->vm.mm, &cache->node,
|
|
|
|
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
|
|
|
0, ggtt->mappable_end,
|
|
|
|
DRM_MM_INSERT_LOW);
|
|
|
|
mutex_unlock(&ggtt->vm.mutex);
|
|
|
|
if (err) /* no inactive aperture space, use cpu reloc */
|
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
cache->node.start = vma->node.start;
|
|
|
|
cache->node.mm = (void *)vma;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = cache->node.start;
|
|
|
|
if (drm_mm_node_allocated(&cache->node)) {
|
|
|
|
ggtt->vm.insert_page(&ggtt->vm,
|
|
|
|
i915_gem_object_get_dma_address(obj, page),
|
|
|
|
offset, I915_CACHE_NONE, 0);
|
|
|
|
} else {
|
|
|
|
offset += page << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
|
|
|
|
offset);
|
|
|
|
cache->page = page;
|
|
|
|
cache->vaddr = (unsigned long)vaddr;
|
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
static void *reloc_vaddr(struct i915_vma *vma,
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-09-08 05:41:17 +00:00
|
|
|
unsigned long page)
|
|
|
|
{
|
2020-08-19 14:08:54 +00:00
|
|
|
struct reloc_cache *cache = &eb->reloc_cache;
|
2020-09-08 05:41:17 +00:00
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (cache->page == page) {
|
|
|
|
vaddr = unmask_page(cache->vaddr);
|
|
|
|
} else {
|
|
|
|
vaddr = NULL;
|
|
|
|
if ((cache->vaddr & KMAP) == 0)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
vaddr = reloc_iomap(vma, eb, page);
|
2020-09-08 05:41:17 +00:00
|
|
|
if (!vaddr)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
vaddr = reloc_kmap(vma->obj, cache, page);
|
2020-09-08 05:41:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
|
|
|
|
{
|
|
|
|
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
|
2022-03-21 22:38:18 +00:00
|
|
|
if (flushes & CLFLUSH_BEFORE)
|
|
|
|
drm_clflush_virt_range(addr, sizeof(*addr));
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
*addr = value;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Writes to the same cacheline are serialised by the CPU
|
|
|
|
* (including clflush). On the write path, we only require
|
|
|
|
* that it hits memory in an orderly fashion and place
|
|
|
|
* mb barriers at the start and end of the relocation phase
|
|
|
|
* to ensure ordering of clflush wrt to the system.
|
|
|
|
*/
|
|
|
|
if (flushes & CLFLUSH_AFTER)
|
2022-03-21 22:38:18 +00:00
|
|
|
drm_clflush_virt_range(addr, sizeof(*addr));
|
2020-09-08 05:41:17 +00:00
|
|
|
} else
|
|
|
|
*addr = value;
|
|
|
|
}
|
|
|
|
|
2020-05-04 14:06:29 +00:00
|
|
|
static u64
|
2020-09-08 05:41:17 +00:00
|
|
|
relocate_entry(struct i915_vma *vma,
|
2020-05-04 14:06:29 +00:00
|
|
|
const struct drm_i915_gem_relocation_entry *reloc,
|
2020-09-08 05:41:17 +00:00
|
|
|
struct i915_execbuffer *eb,
|
2020-05-04 14:06:29 +00:00
|
|
|
const struct i915_vma *target)
|
|
|
|
{
|
|
|
|
u64 target_addr = relocation_target(reloc, target);
|
2020-09-08 05:41:17 +00:00
|
|
|
u64 offset = reloc->offset;
|
2021-08-03 12:48:32 +00:00
|
|
|
bool wide = eb->reloc_cache.use_64bit_reloc;
|
|
|
|
void *vaddr;
|
2020-09-08 05:41:17 +00:00
|
|
|
|
|
|
|
repeat:
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
vaddr = reloc_vaddr(vma, eb,
|
2021-08-03 12:48:32 +00:00
|
|
|
offset >> PAGE_SHIFT);
|
|
|
|
if (IS_ERR(vaddr))
|
|
|
|
return PTR_ERR(vaddr);
|
|
|
|
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
|
|
|
|
clflush_write32(vaddr + offset_in_page(offset),
|
|
|
|
lower_32_bits(target_addr),
|
|
|
|
eb->reloc_cache.vaddr);
|
|
|
|
|
|
|
|
if (wide) {
|
|
|
|
offset += sizeof(u32);
|
|
|
|
target_addr >>= 32;
|
|
|
|
wide = false;
|
|
|
|
goto repeat;
|
2020-09-08 05:41:17 +00:00
|
|
|
}
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return target->node.start | UPDATE;
|
drm/i915: Fallback to using CPU relocations for large batch buffers
If the batch buffer is too large to fit into the aperture and we need a
GTT mapping for relocations, we currently fail. This only applies to a
subset of machines for a subset of environments, quite undesirable. We
can simply check after failing to insert the batch into the GTT as to
whether we only need a mappable binding for relocation and, if so, we can
revert to using a non-mappable binding and an alternate relocation
method. However, using relocate_entry_cpu() is excruciatingly slow for
large buffers on non-LLC as the entire buffer requires clflushing before
and after the relocation handling. Alternatively, we can implement a
third relocation method that only clflushes around the relocation entry.
This is still slower than updating through the GTT, so we prefer using
the GTT where possible, but is orders of magnitude faster as we
typically do not have to then clflush the entire buffer.
An alternative idea of using a temporary WC mapping of the backing store
is promising (it should be faster than using the GTT itself), but
requires fairly extensive arch/x86 support - along the lines of
kmap_atomic_prof_pfn() (which is not universally implemented even for
x86).
Testcase: igt/gem_exec_big #pnv,byt
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88392
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Add a WARN_ONCE for the impossible reloc case and explain in
a short comment why we want to avoid ping-pong.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-01-14 11:20:56 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
static u64
|
|
|
|
eb_relocate_entry(struct i915_execbuffer *eb,
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev,
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const struct drm_i915_gem_relocation_entry *reloc)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *target;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2010-12-08 10:38:14 +00:00
|
|
|
/* we've already hold a reference to all valid objects */
|
2017-06-16 14:05:17 +00:00
|
|
|
target = eb_get_vma(eb, reloc->target_handle);
|
|
|
|
if (unlikely(!target))
|
2010-11-25 18:00:26 +00:00
|
|
|
return -ENOENT;
|
2012-07-31 22:35:01 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* Validate that the target is in a valid r/w GPU domain */
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "reloc with multiple write domains: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d "
|
2010-11-25 18:00:26 +00:00
|
|
|
"read %08x write %08x",
|
2017-06-16 14:05:17 +00:00
|
|
|
reloc->target_handle,
|
2010-11-25 18:00:26 +00:00
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2011-12-14 12:57:27 +00:00
|
|
|
if (unlikely((reloc->write_domain | reloc->read_domains)
|
|
|
|
& ~I915_GEM_GPU_DOMAINS)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d "
|
2010-11-25 18:00:26 +00:00
|
|
|
"read %08x write %08x",
|
2017-06-16 14:05:17 +00:00
|
|
|
reloc->target_handle,
|
2010-11-25 18:00:26 +00:00
|
|
|
(int) reloc->offset,
|
|
|
|
reloc->read_domains,
|
|
|
|
reloc->write_domain);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (reloc->write_domain) {
|
2020-03-03 20:43:44 +00:00
|
|
|
target->flags |= EXEC_OBJECT_WRITE;
|
2017-06-16 14:05:17 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* Sandybridge PPGTT errata: We need a global gtt mapping
|
|
|
|
* for MI and pipe_control writes because the gpu doesn't
|
|
|
|
* properly redirect them through the ppgtt for non_secure
|
|
|
|
* batchbuffers.
|
|
|
|
*/
|
|
|
|
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
2021-12-21 20:00:50 +00:00
|
|
|
GRAPHICS_VER(eb->i915) == 6 &&
|
|
|
|
!i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
|
|
|
|
struct i915_vma *vma = target->vma;
|
|
|
|
|
|
|
|
reloc_cache_unmap(&eb->reloc_cache);
|
|
|
|
mutex_lock(&vma->vm->mutex);
|
2020-03-03 20:43:44 +00:00
|
|
|
err = i915_vma_bind(target->vma,
|
|
|
|
target->vma->obj->cache_level,
|
2022-01-10 17:22:14 +00:00
|
|
|
PIN_GLOBAL, NULL, NULL);
|
2021-12-21 20:00:50 +00:00
|
|
|
mutex_unlock(&vma->vm->mutex);
|
|
|
|
reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
|
2020-05-25 14:19:57 +00:00
|
|
|
if (err)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
|
|
|
}
|
2017-06-16 14:05:17 +00:00
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* If the relocation already has the right value in it, no
|
2010-11-25 18:00:26 +00:00
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
2020-09-08 05:41:17 +00:00
|
|
|
if (!DBG_FORCE_RELOC &&
|
|
|
|
gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
|
2010-12-08 10:38:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
|
|
|
/* Check that the relocation address is valid... */
|
2013-11-03 04:07:11 +00:00
|
|
|
if (unlikely(reloc->offset >
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "Relocation beyond object bounds: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d size %d.\n",
|
|
|
|
reloc->target_handle,
|
|
|
|
(int)reloc->offset,
|
2020-03-03 20:43:44 +00:00
|
|
|
(int)ev->vma->size);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
2010-12-08 10:43:06 +00:00
|
|
|
if (unlikely(reloc->offset & 3)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
|
2017-06-16 14:05:17 +00:00
|
|
|
"target %d offset %d.\n",
|
|
|
|
reloc->target_handle,
|
|
|
|
(int)reloc->offset);
|
2013-12-26 21:39:50 +00:00
|
|
|
return -EINVAL;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2017-06-16 14:05:18 +00:00
|
|
|
/*
|
|
|
|
* If we write into the object, we need to force the synchronisation
|
|
|
|
* barrier, either with an asynchronous clflush or if we executed the
|
|
|
|
* patching using the GPU (though that should be serialised by the
|
|
|
|
* timeline). To be completely sure, and since we are required to
|
|
|
|
* do relocations we are already stalling, disable the user's opt
|
2017-08-16 08:52:09 +00:00
|
|
|
* out of our synchronisation.
|
2017-06-16 14:05:18 +00:00
|
|
|
*/
|
2020-03-03 20:43:44 +00:00
|
|
|
ev->flags &= ~EXEC_OBJECT_ASYNC;
|
2017-06-16 14:05:18 +00:00
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
/* and update the user's relocation entry */
|
2020-09-08 05:41:17 +00:00
|
|
|
return relocate_entry(ev->vma, reloc, eb, target->vma);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2012-03-24 20:12:53 +00:00
|
|
|
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
|
2020-03-03 20:43:44 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
2020-04-07 08:59:30 +00:00
|
|
|
struct drm_i915_gem_relocation_entry __user *urelocs =
|
|
|
|
u64_to_user_ptr(entry->relocs_ptr);
|
|
|
|
unsigned long remain = entry->relocation_count;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-04-07 08:59:30 +00:00
|
|
|
if (unlikely(remain > N_RELOC(ULONG_MAX)))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EINVAL;
|
2016-10-18 12:02:51 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* We must check that the entire relocation array is safe
|
|
|
|
* to read. However, if the array is not writable the user loses
|
|
|
|
* the updated relocation values.
|
|
|
|
*/
|
2020-04-07 08:59:30 +00:00
|
|
|
if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct drm_i915_gem_relocation_entry *r = stack;
|
|
|
|
unsigned int count =
|
2020-04-07 08:59:30 +00:00
|
|
|
min_t(unsigned long, remain, ARRAY_SIZE(stack));
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
unsigned int copied;
|
2012-03-24 20:12:53 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* This is the fast path and we cannot handle a pagefault
|
2016-10-18 12:02:51 +00:00
|
|
|
* whilst holding the struct mutex lest the user pass in the
|
|
|
|
* relocations contained within a mmaped bo. For in such a case
|
|
|
|
* we, the page fault handler would call i915_gem_fault() and
|
|
|
|
* we would try to acquire the struct mutex again. Obviously
|
|
|
|
* this is bad and so lockdep complains vehemently.
|
|
|
|
*/
|
2020-08-19 14:08:43 +00:00
|
|
|
pagefault_disable();
|
|
|
|
copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
|
|
|
|
pagefault_enable();
|
2020-09-08 05:41:17 +00:00
|
|
|
if (unlikely(copied)) {
|
|
|
|
remain = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
remain -= count;
|
2012-03-24 20:12:53 +00:00
|
|
|
do {
|
2020-03-03 20:43:44 +00:00
|
|
|
u64 offset = eb_relocate_entry(eb, ev, r);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (likely(offset == 0)) {
|
|
|
|
} else if ((s64)offset < 0) {
|
2020-09-08 05:41:17 +00:00
|
|
|
remain = (int)offset;
|
|
|
|
goto out;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Note that reporting an error now
|
|
|
|
* leaves everything in an inconsistent
|
|
|
|
* state as we have *already* changed
|
|
|
|
* the relocation value inside the
|
|
|
|
* object. As we have not changed the
|
|
|
|
* reloc.presumed_offset or will not
|
|
|
|
* change the execobject.offset, on the
|
|
|
|
* call we may not rewrite the value
|
|
|
|
* inside the object, leaving it
|
|
|
|
* dangling and causing a GPU hang. Unless
|
|
|
|
* userspace dynamically rebuilds the
|
|
|
|
* relocations on each execbuf rather than
|
|
|
|
* presume a static tree.
|
|
|
|
*
|
|
|
|
* We did previously check if the relocations
|
|
|
|
* were writable (access_ok), an error now
|
|
|
|
* would be a strange race with mprotect,
|
|
|
|
* having already demonstrated that we
|
|
|
|
* can read from this userspace address.
|
|
|
|
*/
|
|
|
|
offset = gen8_canonical_addr(offset & ~UPDATE);
|
2020-03-31 16:21:50 +00:00
|
|
|
__put_user(offset,
|
|
|
|
&urelocs[r - stack].presumed_offset);
|
2012-03-24 20:12:53 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
} while (r++, --count);
|
|
|
|
urelocs += ARRAY_SIZE(stack);
|
|
|
|
} while (remain);
|
2020-09-08 05:41:17 +00:00
|
|
|
out:
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_reset(&eb->reloc_cache, eb);
|
2020-09-08 05:41:17 +00:00
|
|
|
return remain;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int
|
|
|
|
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-08-19 14:08:43 +00:00
|
|
|
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
|
|
|
struct drm_i915_gem_relocation_entry *relocs =
|
|
|
|
u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
|
|
|
|
unsigned int i;
|
2020-03-03 20:43:45 +00:00
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
|
|
|
u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if ((s64)offset < 0) {
|
|
|
|
err = (int)offset;
|
|
|
|
goto err;
|
|
|
|
}
|
2020-03-06 07:16:14 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
err = 0;
|
|
|
|
err:
|
2020-08-19 14:08:48 +00:00
|
|
|
reloc_cache_reset(&eb->reloc_cache, eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
return err;
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
|
|
|
|
{
|
|
|
|
const char __user *addr, *end;
|
|
|
|
unsigned long size;
|
|
|
|
char __maybe_unused c;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
size = entry->relocation_count;
|
|
|
|
if (size == 0)
|
|
|
|
return 0;
|
2020-05-01 19:29:44 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (size > N_RELOC(ULONG_MAX))
|
|
|
|
return -EINVAL;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
addr = u64_to_user_ptr(entry->relocs_ptr);
|
|
|
|
size *= sizeof(struct drm_i915_gem_relocation_entry);
|
|
|
|
if (!access_ok(addr, size))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
end = addr + size;
|
|
|
|
for (; addr < end; addr += PAGE_SIZE) {
|
|
|
|
int err = __get_user(c, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return __get_user(c, end - 1);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
static int eb_copy_relocations(const struct i915_execbuffer *eb)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
2020-08-19 14:08:43 +00:00
|
|
|
struct drm_i915_gem_relocation_entry *relocs;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
2020-08-19 14:08:43 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2020-08-19 14:08:43 +00:00
|
|
|
const unsigned int nreloc = eb->exec[i].relocation_count;
|
|
|
|
struct drm_i915_gem_relocation_entry __user *urelocs;
|
|
|
|
unsigned long size;
|
|
|
|
unsigned long copied;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (nreloc == 0)
|
|
|
|
continue;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
err = check_relocations(&eb->exec[i]);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
|
|
|
|
size = nreloc * sizeof(*relocs);
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
relocs = kvmalloc_array(size, 1, GFP_KERNEL);
|
|
|
|
if (!relocs) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err;
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
|
|
|
/* copy_from_user is limited to < 4GiB */
|
|
|
|
copied = 0;
|
|
|
|
do {
|
|
|
|
unsigned int len =
|
|
|
|
min_t(u64, BIT_ULL(31), size - copied);
|
|
|
|
|
|
|
|
if (__copy_from_user((char *)relocs + copied,
|
|
|
|
(char __user *)urelocs + copied,
|
|
|
|
len))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
copied += len;
|
|
|
|
} while (copied < size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As we do not update the known relocation offsets after
|
|
|
|
* relocating (due to the complexities in lock handling),
|
|
|
|
* we need to mark them as invalid now so that we force the
|
|
|
|
* relocation processing next time. Just in case the target
|
|
|
|
* object is evicted and then rebound into its old
|
|
|
|
* presumed_offset before the next execbuffer - if that
|
|
|
|
* happened we would make the mistake of assuming that the
|
|
|
|
* relocations were valid.
|
|
|
|
*/
|
|
|
|
if (!user_access_begin(urelocs, size))
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
for (copied = 0; copied < nreloc; copied++)
|
|
|
|
unsafe_put_user(-1,
|
|
|
|
&urelocs[copied].presumed_offset,
|
|
|
|
end_user);
|
|
|
|
user_access_end();
|
|
|
|
|
|
|
|
eb->exec[i].relocs_ptr = (uintptr_t)relocs;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
end_user:
|
|
|
|
user_access_end();
|
|
|
|
end:
|
|
|
|
kvfree(relocs);
|
|
|
|
err = -EFAULT;
|
|
|
|
err:
|
|
|
|
while (i--) {
|
|
|
|
relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
|
|
|
|
if (eb->exec[i].relocation_count)
|
|
|
|
kvfree(relocs);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = check_relocations(&eb->exec[i]);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
static int eb_reinit_userptr(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (likely(!(eb->args->flags & __EXEC_USERPTR_USED)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
|
|
|
|
if (!i915_gem_object_is_userptr(ev->vma->obj))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = i915_gem_object_userptr_submit_init(ev->vma->obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ev->flags |= __EXEC_OBJECT_USERPTR_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
|
2020-08-19 14:08:43 +00:00
|
|
|
{
|
|
|
|
bool have_copy = false;
|
|
|
|
struct eb_vma *ev;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
repeat:
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
err = -ERESTARTSYS;
|
|
|
|
goto out;
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
/* We may process another execbuffer during the unlock... */
|
2021-06-10 14:35:25 +00:00
|
|
|
eb_release_vmas(eb, false);
|
2020-08-19 14:08:48 +00:00
|
|
|
i915_gem_ww_ctx_fini(&eb->ww);
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
/*
|
|
|
|
* We take 3 passes through the slowpatch.
|
|
|
|
*
|
|
|
|
* 1 - we try to just prefault all the user relocation entries and
|
|
|
|
* then attempt to reuse the atomic pagefault disabled fast path again.
|
|
|
|
*
|
|
|
|
* 2 - we copy the user entries to a local buffer here outside of the
|
|
|
|
* local and allow ourselves to wait upon any rendering before
|
|
|
|
* relocations
|
|
|
|
*
|
|
|
|
* 3 - we already have a local copy of the relocation entries, but
|
|
|
|
* were interrupted (EAGAIN) whilst waiting for the objects, try again.
|
|
|
|
*/
|
|
|
|
if (!err) {
|
|
|
|
err = eb_prefault_relocations(eb);
|
|
|
|
} else if (!have_copy) {
|
|
|
|
err = eb_copy_relocations(eb);
|
|
|
|
have_copy = err == 0;
|
|
|
|
} else {
|
|
|
|
cond_resched();
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!err)
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
err = eb_reinit_userptr(eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
i915_gem_ww_ctx_init(&eb->ww, true);
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
/* reacquire the objects */
|
|
|
|
repeat_validate:
|
2021-10-14 17:20:00 +00:00
|
|
|
err = eb_pin_engine(eb, false);
|
|
|
|
if (err)
|
2020-08-19 14:08:52 +00:00
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_validate_vmas(eb);
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err;
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
GEM_BUG_ON(!eb->batches[0]);
|
2020-08-19 14:08:43 +00:00
|
|
|
|
|
|
|
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
|
|
|
if (!have_copy) {
|
|
|
|
err = eb_relocate_vma(eb, ev);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
err = eb_relocate_vma_slow(eb, ev);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err && !have_copy)
|
|
|
|
goto repeat;
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
/* as last step, parse the command buffer */
|
|
|
|
err = eb_parse(eb);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
/*
|
|
|
|
* Leave the user relocations as are, this is the painfully slow path,
|
|
|
|
* and we want to avoid the complication of dropping the lock whilst
|
|
|
|
* having buffers reserved in the aperture and so causing spurious
|
|
|
|
* ENOSPC for random operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
err:
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK) {
|
2021-06-10 14:35:25 +00:00
|
|
|
eb_release_vmas(eb, false);
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_ww_ctx_backoff(&eb->ww);
|
|
|
|
if (!err)
|
|
|
|
goto repeat_validate;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:43 +00:00
|
|
|
if (err == -EAGAIN)
|
|
|
|
goto repeat;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (have_copy) {
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
const struct drm_i915_gem_exec_object2 *entry =
|
|
|
|
&eb->exec[i];
|
|
|
|
struct drm_i915_gem_relocation_entry *relocs;
|
|
|
|
|
|
|
|
if (!entry->relocation_count)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
|
|
|
|
kvfree(relocs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
static int eb_relocate_parse(struct i915_execbuffer *eb)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-03-03 20:43:45 +00:00
|
|
|
int err;
|
2020-08-19 14:08:52 +00:00
|
|
|
bool throttle = true;
|
2020-03-03 20:43:45 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
retry:
|
2021-10-14 17:20:00 +00:00
|
|
|
err = eb_pin_engine(eb, throttle);
|
|
|
|
if (err) {
|
2020-08-19 14:08:52 +00:00
|
|
|
if (err != -EDEADLK)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* only throttle once, even if we didn't need to throttle */
|
|
|
|
throttle = false;
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_validate_vmas(eb);
|
|
|
|
if (err == -EAGAIN)
|
|
|
|
goto slow;
|
|
|
|
else if (err)
|
|
|
|
goto err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/* The objects are in their final locations, apply the relocations. */
|
|
|
|
if (eb->args->flags & __EXEC_HAS_RELOC) {
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-03-03 20:43:44 +00:00
|
|
|
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
2020-03-11 16:03:10 +00:00
|
|
|
err = eb_relocate_vma(eb, ev);
|
|
|
|
if (err)
|
2020-08-19 14:08:43 +00:00
|
|
|
break;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
2020-08-19 14:08:43 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err == -EDEADLK)
|
|
|
|
goto err;
|
|
|
|
else if (err)
|
|
|
|
goto slow;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
err = eb_parse(eb);
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (err == -EDEADLK) {
|
2021-06-10 14:35:25 +00:00
|
|
|
eb_release_vmas(eb, false);
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_ww_ctx_backoff(&eb->ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
return err;
|
|
|
|
|
|
|
|
slow:
|
2021-10-14 17:20:00 +00:00
|
|
|
err = eb_relocate_parse_slow(eb);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (err)
|
|
|
|
/*
|
|
|
|
* If the user expects the execobject.offset and
|
|
|
|
* reloc.presumed_offset to be an exact match,
|
|
|
|
* as for using NO_RELOC, then we cannot update
|
|
|
|
* the execobject.offset until we have completed
|
|
|
|
* relocation.
|
|
|
|
*/
|
|
|
|
eb->args->flags &= ~__EXEC_HAS_RELOC;
|
|
|
|
|
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/*
|
|
|
|
* Using two helper loops for the order of which requests / batches are created
|
|
|
|
* and added the to backend. Requests are created in order from the parent to
|
|
|
|
* the last child. Requests are added in the reverse order, from the last child
|
|
|
|
* to parent. This is done for locking reasons as the timeline lock is acquired
|
|
|
|
* during request creation and released when the request is added to the
|
|
|
|
* backend. To make lockdep happy (see intel_context_timeline_lock) this must be
|
|
|
|
* the ordering.
|
|
|
|
*/
|
|
|
|
#define for_each_batch_create_order(_eb, _i) \
|
|
|
|
for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i))
|
|
|
|
#define for_each_batch_add_order(_eb, _i) \
|
|
|
|
BUILD_BUG_ON(!typecheck(int, _i)); \
|
|
|
|
for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i))
|
|
|
|
|
|
|
|
static struct i915_request *
|
|
|
|
eb_find_first_request_added(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_batch_add_order(eb, i)
|
|
|
|
if (eb->requests[i])
|
|
|
|
return eb->requests[i];
|
|
|
|
|
|
|
|
GEM_BUG_ON("Request not found");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
|
|
|
|
|
|
|
/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
|
2022-06-29 17:43:45 +00:00
|
|
|
static int eb_capture_stage(struct i915_execbuffer *eb)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
2021-11-29 20:22:45 +00:00
|
|
|
unsigned int i = count, j;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
|
|
|
while (i--) {
|
2020-03-03 20:43:44 +00:00
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
unsigned int flags = ev->flags;
|
2015-04-27 12:41:18 +00:00
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
if (!(flags & EXEC_OBJECT_CAPTURE))
|
|
|
|
continue;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
2022-06-29 17:43:45 +00:00
|
|
|
if (i915_gem_context_is_recoverable(eb->gem_context) &&
|
|
|
|
(IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
for_each_batch_create_order(eb, j) {
|
2018-02-21 09:56:36 +00:00
|
|
|
struct i915_capture_list *capture;
|
2017-04-15 09:39:02 +00:00
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
|
|
|
|
if (!capture)
|
|
|
|
continue;
|
2021-10-14 17:20:00 +00:00
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
capture->next = eb->capture_lists[j];
|
2022-01-10 17:22:19 +00:00
|
|
|
capture->vma_res = i915_vma_resource_get(vma->resource);
|
2021-11-29 20:22:45 +00:00
|
|
|
eb->capture_lists[j] = capture;
|
|
|
|
}
|
|
|
|
}
|
2022-06-29 17:43:45 +00:00
|
|
|
|
|
|
|
return 0;
|
2021-11-29 20:22:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Commit once we're in the critical path */
|
|
|
|
static void eb_capture_commit(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, j) {
|
|
|
|
struct i915_request *rq = eb->requests[j];
|
|
|
|
|
|
|
|
if (!rq)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rq->capture_list = eb->capture_lists[j];
|
|
|
|
eb->capture_lists[j] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release anything that didn't get committed due to errors.
|
|
|
|
* The capture_list will otherwise be freed at request retire.
|
|
|
|
*/
|
|
|
|
static void eb_capture_release(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, j) {
|
|
|
|
if (eb->capture_lists[j]) {
|
|
|
|
i915_request_free_capture_list(eb->capture_lists[j]);
|
|
|
|
eb->capture_lists[j] = NULL;
|
2017-04-15 09:39:02 +00:00
|
|
|
}
|
2021-11-29 20:22:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_capture_list_clear(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
memset(eb->capture_lists, 0, sizeof(eb->capture_lists));
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2022-06-29 17:43:45 +00:00
|
|
|
static int eb_capture_stage(struct i915_execbuffer *eb)
|
2021-11-29 20:22:45 +00:00
|
|
|
{
|
2022-06-29 17:43:45 +00:00
|
|
|
return 0;
|
2021-11-29 20:22:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_capture_commit(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_capture_release(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_capture_list_clear(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
const unsigned int count = eb->buffer_count;
|
|
|
|
unsigned int i = count;
|
|
|
|
int err = 0, j;
|
|
|
|
|
|
|
|
while (i--) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct i915_vma *vma = ev->vma;
|
|
|
|
unsigned int flags = ev->flags;
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
|
|
|
|
assert_vma_held(vma);
|
2017-04-15 09:39:02 +00:00
|
|
|
|
2017-08-11 11:11:16 +00:00
|
|
|
/*
|
|
|
|
* If the GPU is not _reading_ through the CPU cache, we need
|
|
|
|
* to make sure that any writes (both previous GPU writes from
|
|
|
|
* before a change in snooping levels and normal CPU writes)
|
|
|
|
* caught in that cache are flushed to main memory.
|
|
|
|
*
|
|
|
|
* We want to say
|
|
|
|
* obj->cache_dirty &&
|
|
|
|
* !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
|
|
|
|
* but gcc's optimiser doesn't handle that as well and emits
|
|
|
|
* two jumps instead of one. Maybe one day...
|
2021-10-18 17:45:06 +00:00
|
|
|
*
|
|
|
|
* FIXME: There is also sync flushing in set_pages(), which
|
|
|
|
* serves a different purpose(some of the time at least).
|
|
|
|
*
|
|
|
|
* We should consider:
|
|
|
|
*
|
|
|
|
* 1. Rip out the async flush code.
|
|
|
|
*
|
|
|
|
* 2. Or make the sync flushing use the async clflush path
|
|
|
|
* using mandatory fences underneath. Currently the below
|
|
|
|
* async flush happens after we bind the object.
|
2017-08-11 11:11:16 +00:00
|
|
|
*/
|
|
|
|
if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
|
2017-07-21 14:50:37 +00:00
|
|
|
if (i915_gem_clflush_object(obj, 0))
|
2017-08-16 08:52:06 +00:00
|
|
|
flags &= ~EXEC_OBJECT_ASYNC;
|
2017-07-21 14:50:37 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
/* We only need to await on the first request */
|
2019-05-28 09:29:51 +00:00
|
|
|
if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
|
|
|
|
err = i915_request_await_object
|
2021-10-14 17:20:00 +00:00
|
|
|
(eb_find_first_request_added(eb), obj,
|
|
|
|
flags & EXEC_OBJECT_WRITE);
|
2019-05-28 09:29:51 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
for_each_batch_add_order(eb, j) {
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
if (!eb->requests[j])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = _i915_vma_move_to_active(vma, eb->requests[j],
|
|
|
|
j ? NULL :
|
|
|
|
eb->composite_fence ?
|
|
|
|
eb->composite_fence :
|
|
|
|
&eb->requests[j]->fence,
|
|
|
|
flags | __EXEC_OBJECT_NO_RESERVE);
|
|
|
|
}
|
2011-03-06 13:51:29 +00:00
|
|
|
}
|
2020-03-30 13:37:10 +00:00
|
|
|
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
|
|
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
|
2021-06-10 14:35:25 +00:00
|
|
|
read_lock(&eb->i915->mm.notifier_lock);
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* count is always at least 1, otherwise __EXEC_USERPTR_USED
|
|
|
|
* could not have been set
|
|
|
|
*/
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct eb_vma *ev = &eb->vma[i];
|
|
|
|
struct drm_i915_gem_object *obj = ev->vma->obj;
|
|
|
|
|
|
|
|
if (!i915_gem_object_is_userptr(obj))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = i915_gem_object_userptr_submit_done(obj);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-06-10 14:35:25 +00:00
|
|
|
read_unlock(&eb->i915->mm.notifier_lock);
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
if (unlikely(err))
|
|
|
|
goto err_skip;
|
|
|
|
|
2016-08-18 16:16:40 +00:00
|
|
|
/* Unconditionally flush any chipset caches (for streaming writes). */
|
2021-10-14 17:20:00 +00:00
|
|
|
intel_gt_chipset_flush(eb->gt);
|
2021-11-29 20:22:45 +00:00
|
|
|
eb_capture_commit(eb);
|
|
|
|
|
2017-11-20 10:20:01 +00:00
|
|
|
return 0;
|
2019-05-28 09:29:51 +00:00
|
|
|
|
|
|
|
err_skip:
|
2021-10-14 17:20:00 +00:00
|
|
|
for_each_batch_create_order(eb, j) {
|
|
|
|
if (!eb->requests[j])
|
|
|
|
break;
|
|
|
|
|
|
|
|
i915_request_set_error_once(eb->requests[j], err);
|
|
|
|
}
|
2019-05-28 09:29:51 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2017-06-15 08:14:33 +00:00
|
|
|
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2013-01-17 21:23:36 +00:00
|
|
|
|
2015-10-06 10:39:55 +00:00
|
|
|
/* Kernel clipping was a DRI1 misfeature */
|
2020-08-04 08:59:53 +00:00
|
|
|
if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
|
|
|
|
I915_EXEC_USE_EXTENSIONS))) {
|
2017-08-15 14:57:33 +00:00
|
|
|
if (exec->num_cliprects || exec->cliprects_ptr)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
2015-10-06 10:39:55 +00:00
|
|
|
|
|
|
|
if (exec->DR4 == 0xffffffff) {
|
|
|
|
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
|
|
|
exec->DR4 = 0;
|
|
|
|
}
|
|
|
|
if (exec->DR1 || exec->DR4)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2015-10-06 10:39:55 +00:00
|
|
|
|
|
|
|
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
|
2019-12-09 12:23:14 +00:00
|
|
|
return -EINVAL;
|
2015-10-06 10:39:55 +00:00
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
return 0;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
2012-01-03 17:23:29 +00:00
|
|
|
{
|
2017-02-14 11:32:42 +00:00
|
|
|
u32 *cs;
|
|
|
|
int i;
|
2012-01-03 17:23:29 +00:00
|
|
|
|
2021-06-05 15:53:54 +00:00
|
|
|
if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
|
2020-06-02 22:09:53 +00:00
|
|
|
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
|
2014-04-24 06:09:09 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-01-03 17:23:29 +00:00
|
|
|
|
2018-02-21 09:56:36 +00:00
|
|
|
cs = intel_ring_begin(rq, 4 * 2 + 2);
|
2017-02-14 11:32:42 +00:00
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
2012-01-03 17:23:29 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(4);
|
2012-01-03 17:23:29 +00:00
|
|
|
for (i = 0; i < 4; i++) {
|
2017-02-14 11:32:42 +00:00
|
|
|
*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
|
|
|
|
*cs++ = 0;
|
2012-01-03 17:23:29 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
*cs++ = MI_NOOP;
|
2018-02-21 09:56:36 +00:00
|
|
|
intel_ring_advance(rq, cs);
|
2012-01-03 17:23:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 20:59:06 +00:00
|
|
|
static struct i915_vma *
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow_batch_pin(struct i915_execbuffer *eb,
|
|
|
|
struct drm_i915_gem_object *obj,
|
2019-12-11 23:08:56 +00:00
|
|
|
struct i915_address_space *vm,
|
|
|
|
unsigned int flags)
|
2018-05-22 20:59:06 +00:00
|
|
|
{
|
2019-11-15 17:08:35 +00:00
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
2018-05-22 20:59:06 +00:00
|
|
|
|
2019-11-15 17:08:35 +00:00
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
return vma;
|
|
|
|
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE);
|
2019-11-15 17:08:35 +00:00
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return vma;
|
2018-05-22 20:59:06 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
|
|
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
|
|
|
* hsw should have this fixed, but bdw mucks it up again. */
|
|
|
|
if (eb->batch_flags & I915_DISPATCH_SECURE)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE);
|
2020-08-19 14:08:54 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-11 11:04:36 +00:00
|
|
|
static int eb_parse(struct i915_execbuffer *eb)
|
2014-12-11 20:13:12 +00:00
|
|
|
{
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-08-19 14:08:48 +00:00
|
|
|
struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
|
2020-08-19 14:08:54 +00:00
|
|
|
struct i915_vma *shadow, *trampoline, *batch;
|
2020-10-15 11:59:54 +00:00
|
|
|
unsigned long len;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
if (!eb_use_cmdparser(eb)) {
|
2021-10-14 17:20:00 +00:00
|
|
|
batch = eb_dispatch_secure(eb, eb->batches[0]->vma);
|
2020-08-19 14:08:54 +00:00
|
|
|
if (IS_ERR(batch))
|
|
|
|
return PTR_ERR(batch);
|
|
|
|
|
|
|
|
goto secure_batch;
|
|
|
|
}
|
2019-12-11 11:04:36 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (intel_context_is_parallel(eb->context))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = eb->batch_len[0];
|
2019-12-11 23:08:56 +00:00
|
|
|
if (!CMDPARSER_USES_GGTT(eb->i915)) {
|
|
|
|
/*
|
|
|
|
* ppGTT backed shadow buffers must be mapped RO, to prevent
|
|
|
|
* post-scan tampering
|
|
|
|
*/
|
|
|
|
if (!eb->context->vm->has_read_only) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Cannot prevent post-scan tampering without RO capable vm\n");
|
2019-12-11 23:08:56 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
|
|
|
|
}
|
2021-10-14 17:20:00 +00:00
|
|
|
if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */
|
2020-10-15 11:59:54 +00:00
|
|
|
return -EINVAL;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
if (!pool) {
|
2021-10-14 17:20:00 +00:00
|
|
|
pool = intel_gt_get_buffer_pool(eb->gt, len,
|
2021-01-19 13:31:06 +00:00
|
|
|
I915_MAP_WB);
|
2020-08-19 14:08:48 +00:00
|
|
|
if (IS_ERR(pool))
|
|
|
|
return PTR_ERR(pool);
|
|
|
|
eb->batch_pool = pool;
|
|
|
|
}
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = i915_gem_object_lock(pool->obj, &eb->ww);
|
|
|
|
if (err)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
return err;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (IS_ERR(shadow))
|
|
|
|
return PTR_ERR(shadow);
|
|
|
|
|
2021-03-23 15:50:18 +00:00
|
|
|
intel_gt_buffer_pool_mark_used(pool);
|
2019-12-11 23:08:56 +00:00
|
|
|
i915_gem_object_set_readonly(shadow->obj);
|
2020-06-04 10:37:30 +00:00
|
|
|
shadow->private = pool;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
|
|
|
trampoline = NULL;
|
|
|
|
if (CMDPARSER_USES_GGTT(eb->i915)) {
|
|
|
|
trampoline = shadow;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
shadow = shadow_batch_pin(eb, pool->obj,
|
2021-10-14 17:20:00 +00:00
|
|
|
&eb->gt->ggtt->vm,
|
2019-12-11 23:08:56 +00:00
|
|
|
PIN_GLOBAL);
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (IS_ERR(shadow))
|
|
|
|
return PTR_ERR(shadow);
|
|
|
|
|
2020-06-04 10:37:30 +00:00
|
|
|
shadow->private = pool;
|
2019-12-11 23:08:56 +00:00
|
|
|
|
|
|
|
eb->batch_flags |= I915_DISPATCH_SECURE;
|
|
|
|
}
|
drm/i915/cmdparser: Add support for backward jumps
To keep things manageable, the pre-gen9 cmdparser does not
attempt to track any form of nested BB_START's. This did not
prevent usermode from using nested starts, or even chained
batches because the cmdparser is not strictly enforced pre gen9.
Instead, the existence of a nested BB_START would cause the batch
to be emitted in insecure mode, and any privileged capabilities
would not be available.
For Gen9, the cmdparser becomes mandatory (for BCS at least), and
so not providing any form of nested BB_START support becomes
overly restrictive. Any such batch will simply not run.
We make heavy use of backward jumps in igt, and it is much easier
to add support for this restricted subset of nested jumps, than to
rewrite the whole of our test suite to avoid them.
Add the required logic to support limited backward jumps, to
instructions that have already been validated by the parser.
Note that it's not sufficient to simply approve any BB_START
that jumps backwards in the buffer because this would allow an
attacker to embed a rogue instruction sequence within the
operand words of a harmless instruction (say LRI) and jump to
that.
We introduce a bit array to track every instr offset successfully
validated, and test the target of BB_START against this. If the
target offset hits, it is re-written to the same offset in the
shadow buffer and the BB_START cmd is allowed.
Note: This patch deliberately ignores checkpatch issues in the
cmdtables, in order to match the style of the surrounding code.
We'll correct the entire file in one go in a later patch.
v2: set dispatch secure late (Mika)
v3: rebase (Mika)
v4: Clear whitelist on each parse
Minor review updates (Chris)
v5: Correct backward jump batching
v6: fix compilation error due to struct eb shuffle (Mika)
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
2018-09-20 16:58:36 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
batch = eb_dispatch_secure(eb, shadow);
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
if (IS_ERR(batch))
|
|
|
|
return PTR_ERR(batch);
|
2020-08-19 14:08:54 +00:00
|
|
|
|
2021-11-16 14:20:45 +00:00
|
|
|
err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
|
2021-07-14 19:34:15 +00:00
|
|
|
if (err)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
return err;
|
2021-07-14 19:34:15 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
err = intel_engine_cmd_parser(eb->context->engine,
|
|
|
|
eb->batches[0]->vma,
|
2021-07-14 19:34:15 +00:00
|
|
|
eb->batch_start_offset,
|
2021-10-14 17:20:00 +00:00
|
|
|
eb->batch_len[0],
|
2021-07-14 19:34:15 +00:00
|
|
|
shadow, trampoline);
|
2019-12-11 23:08:56 +00:00
|
|
|
if (err)
|
drm/i915: Remove short-term pins from execbuf, v6.
Add a flag PIN_VALIDATE, to indicate we don't need to pin and only
protected by the object lock.
This removes the need to unpin, which is done by just releasing the
lock.
eb_reserve is slightly reworked for readability, but the same steps
are still done:
- First pass pins with NONBLOCK.
- Second pass unbinds all objects first, then pins.
- Third pass is only called when not all objects are softpinned, and
unbinds all objects, then calls i915_gem_evict_vm(), then pins.
Changes since v1:
- Split out eb_reserve() into separate functions for readability.
Changes since v2:
- Make batch buffer mappable on platforms where only GGTT is available,
to prevent moving the batch buffer during relocations.
Changes since v3:
- Preserve current behavior for batch buffer, instead be cautious when
calling i915_gem_object_ggtt_pin_ww, and re-use the current batch vma
if it's inside ggtt and map-and-fenceable.
- Remove impossible condition check from eb_reserve. (Matt)
Changes since v5:
- Do not even temporarily pin, just call i915_gem_evict_vm() and mark
all vma's as unpinned.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-7-maarten.lankhorst@linux.intel.com
2022-01-14 13:23:20 +00:00
|
|
|
return err;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
eb->batches[0] = &eb->vma[eb->buffer_count++];
|
|
|
|
eb->batches[0]->vma = i915_vma_get(shadow);
|
|
|
|
eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
|
2014-12-11 20:13:12 +00:00
|
|
|
|
2019-12-11 23:08:56 +00:00
|
|
|
eb->trampoline = trampoline;
|
2018-05-22 20:59:06 +00:00
|
|
|
eb->batch_start_offset = 0;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
secure_batch:
|
|
|
|
if (batch) {
|
2021-10-14 17:20:00 +00:00
|
|
|
if (intel_context_is_parallel(eb->context))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eb->batches[0] = &eb->vma[eb->buffer_count++];
|
|
|
|
eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
|
|
|
|
eb->batches[0]->vma = i915_vma_get(batch);
|
2020-08-19 14:08:54 +00:00
|
|
|
}
|
2019-12-11 11:04:36 +00:00
|
|
|
return 0;
|
2014-12-11 20:13:12 +00:00
|
|
|
}
|
2014-09-06 09:28:27 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_request_submit(struct i915_execbuffer *eb,
|
|
|
|
struct i915_request *rq,
|
|
|
|
struct i915_vma *batch,
|
|
|
|
u64 batch_len)
|
2014-07-03 15:28:05 +00:00
|
|
|
{
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (intel_context_nopreempt(rq->context))
|
|
|
|
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
2021-10-14 17:20:00 +00:00
|
|
|
err = i915_reset_gen7_sol_offsets(rq);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2014-07-03 15:28:05 +00:00
|
|
|
}
|
|
|
|
|
2019-01-29 18:54:50 +00:00
|
|
|
/*
|
|
|
|
* After we completed waiting for other engines (using HW semaphores)
|
|
|
|
* then we can signal that this request/batch is ready to run. This
|
|
|
|
* allows us to determine if the batch is still waiting on the GPU
|
|
|
|
* or actually running by checking the breadcrumb.
|
|
|
|
*/
|
2021-10-14 17:20:00 +00:00
|
|
|
if (rq->context->engine->emit_init_breadcrumb) {
|
|
|
|
err = rq->context->engine->emit_init_breadcrumb(rq);
|
2019-01-29 18:54:50 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
err = rq->context->engine->emit_bb_start(rq,
|
|
|
|
batch->node.start +
|
|
|
|
eb->batch_start_offset,
|
|
|
|
batch_len,
|
|
|
|
eb->batch_flags);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2014-07-03 15:28:05 +00:00
|
|
|
|
2019-12-11 23:08:56 +00:00
|
|
|
if (eb->trampoline) {
|
2021-10-14 17:20:00 +00:00
|
|
|
GEM_BUG_ON(intel_context_is_parallel(rq->context));
|
2019-12-11 23:08:56 +00:00
|
|
|
GEM_BUG_ON(eb->batch_start_offset);
|
2021-10-14 17:20:00 +00:00
|
|
|
err = rq->context->engine->emit_bb_start(rq,
|
|
|
|
eb->trampoline->node.start +
|
|
|
|
batch_len, 0, 0);
|
2019-12-11 23:08:56 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-06 10:39:55 +00:00
|
|
|
return 0;
|
2014-07-03 15:28:05 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_submit(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = eb_move_to_gpu(eb);
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, i) {
|
|
|
|
if (!eb->requests[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
trace_i915_request_queue(eb->requests[i], eb->batch_flags);
|
|
|
|
if (!err)
|
|
|
|
err = eb_request_submit(eb, eb->requests[i],
|
|
|
|
eb->batches[i]->vma,
|
|
|
|
eb->batch_len[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-12-14 19:33:35 +00:00
|
|
|
static int num_vcs_engines(struct drm_i915_private *i915)
|
2019-08-09 12:31:53 +00:00
|
|
|
{
|
2021-12-14 19:33:35 +00:00
|
|
|
return hweight_long(VDBOX_MASK(to_gt(i915)));
|
2019-08-09 12:31:53 +00:00
|
|
|
}
|
|
|
|
|
2018-02-08 11:39:17 +00:00
|
|
|
/*
|
2014-04-17 02:37:40 +00:00
|
|
|
* Find one BSD ring to dispatch the corresponding BSD command.
|
2016-07-27 08:07:27 +00:00
|
|
|
* The engine index is returned.
|
2014-04-17 02:37:40 +00:00
|
|
|
*/
|
2016-01-15 15:12:50 +00:00
|
|
|
static unsigned int
|
2016-07-27 08:07:27 +00:00
|
|
|
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
|
|
|
struct drm_file *file)
|
2014-04-17 02:37:40 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
|
|
|
|
2016-01-15 15:12:50 +00:00
|
|
|
/* Check whether the file_priv has already selected one ring. */
|
2016-09-01 11:58:21 +00:00
|
|
|
if ((int)file_priv->bsd_engine < 0)
|
2019-08-09 09:10:10 +00:00
|
|
|
file_priv->bsd_engine =
|
2022-10-10 02:44:02 +00:00
|
|
|
get_random_u32_below(num_vcs_engines(dev_priv));
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
|
2016-07-27 08:07:27 +00:00
|
|
|
return file_priv->bsd_engine;
|
drm/i915: Prevent negative relocation deltas from wrapping
This is pure evil. Userspace, I'm looking at you SNA, repacks batch
buffers on the fly after generation as they are being passed to the
kernel for execution. These batches also contain self-referenced
relocations as a single buffer encompasses the state commands, kernels,
vertices and sampler. During generation the buffers are placed at known
offsets within the full batch, and then the relocation deltas (as passed
to the kernel) are tweaked as the batch is repacked into a smaller buffer.
This means that userspace is passing negative relocations deltas, which
subsequently wrap to large values if the batch is at a low address. The
GPU hangs when it then tries to use the large value as a base for its
address offsets, rather than wrapping back to the real value (as one
would hope). As the GPU uses positive offsets from the base, we can
treat the relocation address as the minimum address read by the GPU.
For the upper bound, we trust that userspace will not read beyond the
end of the buffer.
So, how do we fix negative relocations from wrapping? We can either
check that every relocation looks valid when we write it, and then
position each object such that we prevent the offset wraparound, or we
just special-case the self-referential behaviour of SNA and force all
batches to be above 256k. Daniel prefers the latter approach.
This fixes a GPU hang when it tries to use an address (relocation +
offset) greater than the GTT size. The issue would occur quite easily
with full-ppgtt as each fd gets its own VM space, so low offsets would
often be handed out. However, with the rearrangement of the low GTT due
to capturing the BIOS framebuffer, it is already affecting kernels 3.15
onwards. I think only IVB+ is susceptible to this bug, but the workaround
should only kick in rarely, so it seems sensible to always apply it.
v3: Use a bias for batch buffers to prevent small negative delta relocations
from wrapping.
v4 from Daniel:
- s/BIAS/BATCH_OFFSET_BIAS/
- Extract eb_vma_misplaced/i915_vma_misplaced since the conditions
were growing rather cumbersome.
- Add a comment to eb_get_batch explaining why we do this.
- Apply the batch offset bias everywhere but mention that we've only
observed it on gen7 gpus.
- Drop PIN_OFFSET_FIX for now, that slipped in from a feature patch.
v5: Add static to eb_get_batch, spotted by 0-day tester.
Testcase: igt/gem_bad_reloc
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78533
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Cc: stable@vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-05-23 06:48:08 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
static const enum intel_engine_id user_ring_map[] = {
|
2019-03-05 18:03:30 +00:00
|
|
|
[I915_EXEC_DEFAULT] = RCS0,
|
|
|
|
[I915_EXEC_RENDER] = RCS0,
|
|
|
|
[I915_EXEC_BLT] = BCS0,
|
|
|
|
[I915_EXEC_BSD] = VCS0,
|
|
|
|
[I915_EXEC_VEBOX] = VECS0
|
2016-01-15 15:12:50 +00:00
|
|
|
};
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
|
2019-08-15 20:57:09 +00:00
|
|
|
{
|
|
|
|
struct intel_ring *ring = ce->ring;
|
|
|
|
struct intel_timeline *tl = ce->timeline;
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Completely unscientific finger-in-the-air estimates for suitable
|
|
|
|
* maximum user request size (to avoid blocking) and then backoff.
|
|
|
|
*/
|
|
|
|
if (intel_ring_update_space(ring) >= PAGE_SIZE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a request that after waiting upon, there will be at least half
|
|
|
|
* the ring available. The hysteresis allows us to compete for the
|
|
|
|
* shared ring and should mean that we sleep less often prior to
|
|
|
|
* claiming our resources, but not so long that the ring completely
|
|
|
|
* drains before we can submit our next request.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(rq, &tl->requests, link) {
|
|
|
|
if (rq->ring != ring)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (__intel_ring_space(rq->postfix,
|
|
|
|
ring->emit, ring->size) > ring->size / 2)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (&rq->link == &tl->requests)
|
|
|
|
return NULL; /* weird, we will check again later for real */
|
|
|
|
|
|
|
|
return i915_request_get(rq);
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
|
|
|
|
bool throttle)
|
2019-08-15 20:57:09 +00:00
|
|
|
{
|
|
|
|
struct intel_timeline *tl;
|
2020-08-19 14:08:52 +00:00
|
|
|
struct i915_request *rq = NULL;
|
2019-04-25 05:01:43 +00:00
|
|
|
|
2019-08-04 12:48:25 +00:00
|
|
|
/*
|
|
|
|
* Take a local wakeref for preparing to dispatch the execbuf as
|
|
|
|
* we expect to access the hardware fairly frequently in the
|
|
|
|
* process, and require the engine to be kept awake between accesses.
|
|
|
|
* Upon dispatch, we acquire another prolonged wakeref that we hold
|
|
|
|
* until the timeline is idle, which in turn releases the wakeref
|
|
|
|
* taken on the engine, and the parent device.
|
|
|
|
*/
|
2019-08-15 20:57:09 +00:00
|
|
|
tl = intel_context_timeline_lock(ce);
|
2021-10-14 17:20:00 +00:00
|
|
|
if (IS_ERR(tl))
|
|
|
|
return PTR_ERR(tl);
|
2019-08-04 12:48:25 +00:00
|
|
|
|
|
|
|
intel_context_enter(ce);
|
2020-08-19 14:08:52 +00:00
|
|
|
if (throttle)
|
|
|
|
rq = eb_throttle(eb, ce);
|
2019-08-15 20:57:09 +00:00
|
|
|
intel_context_timeline_unlock(tl);
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (rq) {
|
|
|
|
bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
|
|
|
|
long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT;
|
|
|
|
|
|
|
|
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
|
|
|
|
timeout) < 0) {
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
2022-01-11 16:39:29 +00:00
|
|
|
/*
|
|
|
|
* Error path, cannot use intel_context_timeline_lock as
|
|
|
|
* that is user interruptable and this clean up step
|
|
|
|
* must be done.
|
|
|
|
*/
|
|
|
|
mutex_lock(&ce->timeline->mutex);
|
2021-10-14 17:20:00 +00:00
|
|
|
intel_context_exit(ce);
|
2022-01-11 16:39:29 +00:00
|
|
|
mutex_unlock(&ce->timeline->mutex);
|
2021-10-14 17:20:00 +00:00
|
|
|
|
|
|
|
if (nonblock)
|
|
|
|
return -EWOULDBLOCK;
|
|
|
|
else
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
i915_request_put(rq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
|
|
|
|
{
|
|
|
|
struct intel_context *ce = eb->context, *child;
|
|
|
|
int err;
|
|
|
|
int i = 0, j = 0;
|
|
|
|
|
|
|
|
GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
|
|
|
|
|
|
|
|
if (unlikely(intel_context_is_banned(ce)))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pinning the contexts may generate requests in order to acquire
|
|
|
|
* GGTT space, so do this first before we reserve a seqno for
|
|
|
|
* ourselves.
|
|
|
|
*/
|
|
|
|
err = intel_context_pin_ww(ce, &eb->ww);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
for_each_child(ce, child) {
|
|
|
|
err = intel_context_pin_ww(child, &eb->ww);
|
|
|
|
GEM_BUG_ON(err); /* perma-pinned should incr a counter */
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_child(ce, child) {
|
|
|
|
err = eb_pin_timeline(eb, child, throttle);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
err = eb_pin_timeline(eb, ce, throttle);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
eb->args->flags |= __EXEC_ENGINE_PINNED;
|
2021-10-14 17:20:00 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
for_each_child(ce, child) {
|
|
|
|
if (j++ < i) {
|
|
|
|
mutex_lock(&child->timeline->mutex);
|
|
|
|
intel_context_exit(child);
|
|
|
|
mutex_unlock(&child->timeline->mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for_each_child(ce, child)
|
|
|
|
intel_context_unpin(child);
|
|
|
|
intel_context_unpin(ce);
|
|
|
|
return err;
|
2019-04-25 05:01:43 +00:00
|
|
|
}
|
|
|
|
|
2019-08-15 20:57:09 +00:00
|
|
|
static void eb_unpin_engine(struct i915_execbuffer *eb)
|
2019-04-25 05:01:43 +00:00
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
struct intel_context *ce = eb->context, *child;
|
2019-08-04 12:48:25 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
eb->args->flags &= ~__EXEC_ENGINE_PINNED;
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
for_each_child(ce, child) {
|
|
|
|
mutex_lock(&child->timeline->mutex);
|
|
|
|
intel_context_exit(child);
|
|
|
|
mutex_unlock(&child->timeline->mutex);
|
|
|
|
|
|
|
|
intel_context_unpin(child);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&ce->timeline->mutex);
|
2019-08-04 12:48:25 +00:00
|
|
|
intel_context_exit(ce);
|
2021-10-14 17:20:00 +00:00
|
|
|
mutex_unlock(&ce->timeline->mutex);
|
2019-08-04 12:48:25 +00:00
|
|
|
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
intel_context_unpin(ce);
|
2019-04-25 05:01:43 +00:00
|
|
|
}
|
2016-01-15 15:12:50 +00:00
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
static unsigned int
|
2020-08-19 14:08:51 +00:00
|
|
|
eb_select_legacy_ring(struct i915_execbuffer *eb)
|
2016-01-15 15:12:50 +00:00
|
|
|
{
|
2019-04-25 05:01:43 +00:00
|
|
|
struct drm_i915_private *i915 = eb->i915;
|
2020-08-19 14:08:51 +00:00
|
|
|
struct drm_i915_gem_execbuffer2 *args = eb->args;
|
2016-01-15 15:12:50 +00:00
|
|
|
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
if (user_ring_id != I915_EXEC_BSD &&
|
|
|
|
(args->flags & I915_EXEC_BSD_MASK)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"execbuf with non bsd ring but with invalid "
|
|
|
|
"bsd dispatch flags: %d\n", (int)(args->flags));
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 12:31:53 +00:00
|
|
|
if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
|
2016-01-15 15:12:50 +00:00
|
|
|
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
|
|
|
|
|
|
|
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
2020-08-19 14:08:51 +00:00
|
|
|
bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
|
2016-01-15 15:12:50 +00:00
|
|
|
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
|
|
|
bsd_idx <= I915_EXEC_BSD_RING2) {
|
2016-01-27 13:41:09 +00:00
|
|
|
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
2016-01-15 15:12:50 +00:00
|
|
|
bsd_idx--;
|
|
|
|
} else {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"execbuf with unknown bsd ring: %u\n",
|
|
|
|
bsd_idx);
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
return _VCS(bsd_idx);
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
|
drm/i915/gem: initial conversion to new logging macros using coccinelle
First pass of conversion to the new struct drm_based device logging
macros in the drm/i915/gem directory. This conversion was achieved using
the following coccinelle script that transforms based on the existence
of a straightforward struct drm_i915_private device:
@rule1@
identifier fn, T;
@@
fn(struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@rule2@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
Checkpatch warnings were addressed manually.
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122125750.9737-2-wambui.karugax@gmail.com
2020-01-22 12:57:49 +00:00
|
|
|
drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
|
|
|
|
user_ring_id);
|
2019-04-26 16:33:34 +00:00
|
|
|
return -1;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
return user_ring_map[user_ring_id];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-19 14:08:52 +00:00
|
|
|
eb_select_engine(struct i915_execbuffer *eb)
|
2019-04-26 16:33:34 +00:00
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
struct intel_context *ce, *child;
|
2019-04-26 16:33:34 +00:00
|
|
|
unsigned int idx;
|
|
|
|
int err;
|
|
|
|
|
drm/i915: Allow a context to define its set of engines
Over the last few years, we have debated how to extend the user API to
support an increase in the number of engines, that may be sparse and
even be heterogeneous within a class (not all video decoders created
equal). We settled on using (class, instance) tuples to identify a
specific engine, with an API for the user to construct a map of engines
to capabilities. Into this picture, we then add a challenge of virtual
engines; one user engine that maps behind the scenes to any number of
physical engines. To keep it general, we want the user to have full
control over that mapping. To that end, we allow the user to constrain a
context to define the set of engines that it can access, order fully
controlled by the user via (class, instance). With such precise control
in context setup, we can continue to use the existing execbuf uABI of
specifying a single index; only now it doesn't automagically map onto
the engines, it uses the user defined engine map from the context.
v2: Fixup freeing of local on success of get_engines()
v3: Allow empty engines[]
v4: s/nengine/num_engines/
v5: Replace 64 limit on num_engines with a note that execbuf is
currently limited to only using the first 64 engines.
v6: Actually use the engines_mutex to guard the ctx->engines.
Testcase: igt/gem_ctx_engines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-2-chris@chris-wilson.co.uk
2019-05-21 21:11:26 +00:00
|
|
|
if (i915_gem_context_user_engines(eb->gem_context))
|
2020-08-19 14:08:51 +00:00
|
|
|
idx = eb->args->flags & I915_EXEC_RING_MASK;
|
drm/i915: Allow a context to define its set of engines
Over the last few years, we have debated how to extend the user API to
support an increase in the number of engines, that may be sparse and
even be heterogeneous within a class (not all video decoders created
equal). We settled on using (class, instance) tuples to identify a
specific engine, with an API for the user to construct a map of engines
to capabilities. Into this picture, we then add a challenge of virtual
engines; one user engine that maps behind the scenes to any number of
physical engines. To keep it general, we want the user to have full
control over that mapping. To that end, we allow the user to constrain a
context to define the set of engines that it can access, order fully
controlled by the user via (class, instance). With such precise control
in context setup, we can continue to use the existing execbuf uABI of
specifying a single index; only now it doesn't automagically map onto
the engines, it uses the user defined engine map from the context.
v2: Fixup freeing of local on success of get_engines()
v3: Allow empty engines[]
v4: s/nengine/num_engines/
v5: Replace 64 limit on num_engines with a note that execbuf is
currently limited to only using the first 64 engines.
v6: Actually use the engines_mutex to guard the ctx->engines.
Testcase: igt/gem_ctx_engines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-2-chris@chris-wilson.co.uk
2019-05-21 21:11:26 +00:00
|
|
|
else
|
2020-08-19 14:08:51 +00:00
|
|
|
idx = eb_select_legacy_ring(eb);
|
2019-04-26 16:33:34 +00:00
|
|
|
|
|
|
|
ce = i915_gem_context_get_engine(eb->gem_context, idx);
|
|
|
|
if (IS_ERR(ce))
|
|
|
|
return PTR_ERR(ce);
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (intel_context_is_parallel(ce)) {
|
|
|
|
if (eb->buffer_count < ce->parallel.number_children + 1) {
|
|
|
|
intel_context_put(ce);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (eb->batch_start_offset || eb->args->batch_len) {
|
|
|
|
intel_context_put(ce);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
eb->num_batches = ce->parallel.number_children + 1;
|
|
|
|
|
|
|
|
for_each_child(ce, child)
|
|
|
|
intel_context_get(child);
|
2020-08-19 14:08:52 +00:00
|
|
|
intel_gt_pm_get(ce->engine->gt);
|
2019-04-26 16:33:34 +00:00
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
|
|
|
err = intel_context_alloc_state(ce);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
2021-10-14 17:20:00 +00:00
|
|
|
for_each_child(ce, child) {
|
|
|
|
if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) {
|
|
|
|
err = intel_context_alloc_state(child);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2020-08-19 14:08:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
|
|
|
|
* EIO if the GPU is already wedged.
|
|
|
|
*/
|
|
|
|
err = intel_gt_terminally_wedged(ce->engine->gt);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2022-03-04 08:26:39 +00:00
|
|
|
if (!i915_vm_tryget(ce->vm)) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
eb->context = ce;
|
2021-10-14 17:20:00 +00:00
|
|
|
eb->gt = ce->engine->gt;
|
2020-08-19 14:08:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure engine pool stays alive even if we call intel_context_put
|
|
|
|
* during ww handling. The pool is destroyed when last pm reference
|
|
|
|
* is dropped, which breaks our -EDEADLK handling.
|
|
|
|
*/
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err:
|
|
|
|
intel_gt_pm_put(ce->engine->gt);
|
2021-10-14 17:20:00 +00:00
|
|
|
for_each_child(ce, child)
|
|
|
|
intel_context_put(child);
|
2020-08-19 14:08:52 +00:00
|
|
|
intel_context_put(ce);
|
2019-04-26 16:33:34 +00:00
|
|
|
return err;
|
2016-01-15 15:12:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
static void
|
|
|
|
eb_put_engine(struct i915_execbuffer *eb)
|
|
|
|
{
|
2021-10-14 17:20:00 +00:00
|
|
|
struct intel_context *child;
|
|
|
|
|
2022-03-04 08:26:39 +00:00
|
|
|
i915_vm_put(eb->context->vm);
|
2021-10-14 17:20:00 +00:00
|
|
|
intel_gt_pm_put(eb->gt);
|
|
|
|
for_each_child(eb->context, child)
|
|
|
|
intel_context_put(child);
|
2020-08-19 14:08:52 +00:00
|
|
|
intel_context_put(eb->context);
|
|
|
|
}
|
|
|
|
|
2017-08-15 14:57:33 +00:00
|
|
|
static void
|
2020-08-04 08:59:54 +00:00
|
|
|
__free_fence_array(struct eb_fence *fences, unsigned int n)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
2020-08-04 08:59:54 +00:00
|
|
|
while (n--) {
|
2020-08-04 08:59:53 +00:00
|
|
|
drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
|
2020-08-04 08:59:54 +00:00
|
|
|
dma_fence_put(fences[n].dma_fence);
|
2021-05-05 11:38:12 +00:00
|
|
|
dma_fence_chain_free(fences[n].chain_fence);
|
2020-08-04 08:59:54 +00:00
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
kvfree(fences);
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
static int
|
2020-08-04 08:59:54 +00:00
|
|
|
add_timeline_fence_array(struct i915_execbuffer *eb,
|
|
|
|
const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
2020-08-04 08:59:54 +00:00
|
|
|
struct drm_i915_gem_exec_fence __user *user_fences;
|
|
|
|
u64 __user *user_values;
|
|
|
|
struct eb_fence *f;
|
|
|
|
u64 nfences;
|
|
|
|
int err = 0;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
nfences = timeline_fences->fence_count;
|
|
|
|
if (!nfences)
|
2020-08-04 08:59:53 +00:00
|
|
|
return 0;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
|
|
|
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
|
|
|
if (nfences > min_t(unsigned long,
|
2020-08-04 08:59:54 +00:00
|
|
|
ULONG_MAX / sizeof(*user_fences),
|
|
|
|
SIZE_MAX / sizeof(*f)) - eb->num_fences)
|
2020-08-04 08:59:53 +00:00
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
|
|
|
|
if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
user_values = u64_to_user_ptr(timeline_fences->values_ptr);
|
|
|
|
if (!access_ok(user_values, nfences * sizeof(*user_values)))
|
2020-08-04 08:59:53 +00:00
|
|
|
return -EFAULT;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f = krealloc(eb->fences,
|
|
|
|
(eb->num_fences + nfences) * sizeof(*f),
|
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
|
|
|
if (!f)
|
2020-08-04 08:59:53 +00:00
|
|
|
return -ENOMEM;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
eb->fences = f;
|
|
|
|
f += eb->num_fences;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
|
|
|
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
|
|
|
|
|
|
|
while (nfences--) {
|
|
|
|
struct drm_i915_gem_exec_fence user_fence;
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
2020-08-04 08:59:54 +00:00
|
|
|
struct dma_fence *fence = NULL;
|
|
|
|
u64 point;
|
|
|
|
|
|
|
|
if (__copy_from_user(&user_fence,
|
|
|
|
user_fences++,
|
|
|
|
sizeof(user_fence)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (__get_user(point, user_values++))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
|
|
|
if (!syncobj) {
|
|
|
|
DRM_DEBUG("Invalid syncobj handle provided\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
fence = drm_syncobj_fence_get(syncobj);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!fence && user_fence.flags &&
|
|
|
|
!(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
DRM_DEBUG("Syncobj handle has no fence\n");
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (fence)
|
|
|
|
err = dma_fence_chain_find_seqno(&fence, point);
|
|
|
|
|
|
|
|
if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
|
2020-08-06 16:10:56 +00:00
|
|
|
dma_fence_put(fence);
|
2020-08-04 08:59:54 +00:00
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A point might have been signaled already and
|
|
|
|
* garbage collected from the timeline. In this case
|
|
|
|
* just ignore the point and carry on.
|
|
|
|
*/
|
|
|
|
if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For timeline syncobjs we need to preallocate chains for
|
|
|
|
* later signaling.
|
|
|
|
*/
|
|
|
|
if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
|
|
|
|
/*
|
|
|
|
* Waiting and signaling the same point (when point !=
|
|
|
|
* 0) would break the timeline.
|
|
|
|
*/
|
|
|
|
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
|
|
|
DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
|
|
|
|
dma_fence_put(fence);
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-05-05 11:38:12 +00:00
|
|
|
f->chain_fence = dma_fence_chain_alloc();
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!f->chain_fence) {
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
f->chain_fence = NULL;
|
2017-10-31 10:23:25 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
|
|
|
f->dma_fence = fence;
|
|
|
|
f->value = point;
|
|
|
|
f++;
|
|
|
|
eb->num_fences++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_fence_array(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_execbuffer2 *args = eb->args;
|
|
|
|
struct drm_i915_gem_exec_fence __user *user;
|
|
|
|
unsigned long num_fences = args->num_cliprects;
|
|
|
|
struct eb_fence *f;
|
|
|
|
|
|
|
|
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!num_fences)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
|
|
|
|
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
|
|
|
|
if (num_fences > min_t(unsigned long,
|
|
|
|
ULONG_MAX / sizeof(*user),
|
|
|
|
SIZE_MAX / sizeof(*f) - eb->num_fences))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
user = u64_to_user_ptr(args->cliprects_ptr);
|
|
|
|
if (!access_ok(user, num_fences * sizeof(*user)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
f = krealloc(eb->fences,
|
|
|
|
(eb->num_fences + num_fences) * sizeof(*f),
|
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
|
|
|
if (!f)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
eb->fences = f;
|
|
|
|
f += eb->num_fences;
|
|
|
|
while (num_fences--) {
|
|
|
|
struct drm_i915_gem_exec_fence user_fence;
|
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
struct dma_fence *fence = NULL;
|
|
|
|
|
|
|
|
if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
syncobj = drm_syncobj_find(eb->file, user_fence.handle);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (!syncobj) {
|
|
|
|
DRM_DEBUG("Invalid syncobj handle provided\n");
|
2020-08-04 08:59:54 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
|
|
|
|
fence = drm_syncobj_fence_get(syncobj);
|
|
|
|
if (!fence) {
|
|
|
|
DRM_DEBUG("Syncobj handle has no fence\n");
|
|
|
|
drm_syncobj_put(syncobj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2017-10-31 10:23:25 +00:00
|
|
|
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
|
|
|
|
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
|
|
|
|
f->dma_fence = fence;
|
|
|
|
f->value = 0;
|
|
|
|
f->chain_fence = NULL;
|
|
|
|
f++;
|
|
|
|
eb->num_fences++;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
return 0;
|
2020-08-04 08:59:54 +00:00
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
static void put_fence_array(struct eb_fence *fences, int num_fences)
|
|
|
|
{
|
|
|
|
if (fences)
|
|
|
|
__free_fence_array(fences, num_fences);
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-10-14 17:20:00 +00:00
|
|
|
await_fence_array(struct i915_execbuffer *eb,
|
|
|
|
struct i915_request *rq)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
|
|
|
unsigned int n;
|
|
|
|
int err;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
for (n = 0; n < eb->num_fences; n++) {
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
unsigned int flags;
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!eb->fences[n].dma_fence)
|
|
|
|
continue;
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static void signal_fence_array(const struct i915_execbuffer *eb,
|
|
|
|
struct dma_fence * const fence)
|
2017-08-15 14:57:33 +00:00
|
|
|
{
|
|
|
|
unsigned int n;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
for (n = 0; n < eb->num_fences; n++) {
|
2017-08-15 14:57:33 +00:00
|
|
|
struct drm_syncobj *syncobj;
|
|
|
|
unsigned int flags;
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
|
2017-08-15 14:57:33 +00:00
|
|
|
if (!(flags & I915_EXEC_FENCE_SIGNAL))
|
|
|
|
continue;
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (eb->fences[n].chain_fence) {
|
|
|
|
drm_syncobj_add_point(syncobj,
|
|
|
|
eb->fences[n].chain_fence,
|
|
|
|
fence,
|
|
|
|
eb->fences[n].value);
|
|
|
|
/*
|
|
|
|
* The chain's ownership is transferred to the
|
|
|
|
* timeline.
|
|
|
|
*/
|
|
|
|
eb->fences[n].chain_fence = NULL;
|
|
|
|
} else {
|
|
|
|
drm_syncobj_replace_fence(syncobj, fence);
|
|
|
|
}
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
static int
|
|
|
|
parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
|
|
|
|
{
|
|
|
|
struct i915_execbuffer *eb = data;
|
|
|
|
struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
|
|
|
|
|
|
|
|
if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return add_timeline_fence_array(eb, &timeline_fences);
|
|
|
|
}
|
|
|
|
|
2020-03-03 08:05:46 +00:00
|
|
|
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
|
|
|
|
{
|
|
|
|
struct i915_request *rq, *rn;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(rq, rn, &tl->requests, link)
|
|
|
|
if (rq == end || !i915_request_retire(rq))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
|
|
|
|
int err, bool last_parallel)
|
2020-03-03 08:05:46 +00:00
|
|
|
{
|
|
|
|
struct intel_timeline * const tl = i915_request_timeline(rq);
|
|
|
|
struct i915_sched_attr attr = {};
|
|
|
|
struct i915_request *prev;
|
|
|
|
|
|
|
|
lockdep_assert_held(&tl->mutex);
|
|
|
|
lockdep_unpin_lock(&tl->mutex, rq->cookie);
|
|
|
|
|
|
|
|
trace_i915_request_add(rq);
|
|
|
|
|
|
|
|
prev = __i915_request_commit(rq);
|
|
|
|
|
|
|
|
/* Check that the context wasn't destroyed before submission */
|
2020-03-19 17:07:06 +00:00
|
|
|
if (likely(!intel_context_is_closed(eb->context))) {
|
2020-03-03 08:05:46 +00:00
|
|
|
attr = eb->gem_context->sched;
|
|
|
|
} else {
|
|
|
|
/* Serialise with context_close via the add_to_timeline */
|
2020-03-04 12:18:48 +00:00
|
|
|
i915_request_set_error_once(rq, -ENOENT);
|
|
|
|
__i915_request_skip(rq);
|
2020-12-03 10:34:32 +00:00
|
|
|
err = -ENOENT; /* override any transient errors */
|
2020-03-03 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (intel_context_is_parallel(eb->context)) {
|
|
|
|
if (err) {
|
|
|
|
__i915_request_skip(rq);
|
|
|
|
set_bit(I915_FENCE_FLAG_SKIP_PARALLEL,
|
|
|
|
&rq->fence.flags);
|
|
|
|
}
|
|
|
|
if (last_parallel)
|
|
|
|
set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
|
|
|
|
&rq->fence.flags);
|
|
|
|
}
|
|
|
|
|
2020-03-03 08:05:46 +00:00
|
|
|
__i915_request_queue(rq, &attr);
|
|
|
|
|
|
|
|
/* Try to clean up the client's timeline after submitting the request */
|
|
|
|
if (prev)
|
|
|
|
retire_requests(tl, prev);
|
|
|
|
|
|
|
|
mutex_unlock(&tl->mutex);
|
2020-12-03 10:34:32 +00:00
|
|
|
|
|
|
|
return err;
|
2020-03-03 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static int eb_requests_add(struct i915_execbuffer *eb, int err)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We iterate in reverse order of creation to release timeline mutexes in
|
|
|
|
* same order.
|
|
|
|
*/
|
|
|
|
for_each_batch_add_order(eb, i) {
|
|
|
|
struct i915_request *rq = eb->requests[i];
|
|
|
|
|
|
|
|
if (!rq)
|
|
|
|
continue;
|
|
|
|
err |= eb_request_add(eb, rq, err, i == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
static const i915_user_extension_fn execbuf_extensions[] = {
|
2020-08-04 08:59:54 +00:00
|
|
|
[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
|
2020-08-04 08:59:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
|
|
|
|
struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
|
|
|
|
* have another flag also using it at the same time.
|
|
|
|
*/
|
|
|
|
if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (args->num_cliprects != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
|
|
|
|
execbuf_extensions,
|
|
|
|
ARRAY_SIZE(execbuf_extensions),
|
|
|
|
eb);
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
static void eb_requests_get(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, i) {
|
|
|
|
if (!eb->requests[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
i915_request_get(eb->requests[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eb_requests_put(struct i915_execbuffer *eb)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, i) {
|
|
|
|
if (!eb->requests[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
i915_request_put(eb->requests[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sync_file *
|
|
|
|
eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
|
|
|
|
{
|
|
|
|
struct sync_file *out_fence = NULL;
|
|
|
|
struct dma_fence_array *fence_array;
|
|
|
|
struct dma_fence **fences;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!intel_context_is_parent(eb->context));
|
|
|
|
|
|
|
|
fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL);
|
|
|
|
if (!fences)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2021-10-14 17:20:03 +00:00
|
|
|
for_each_batch_create_order(eb, i) {
|
2021-10-14 17:20:00 +00:00
|
|
|
fences[i] = &eb->requests[i]->fence;
|
2021-10-14 17:20:03 +00:00
|
|
|
__set_bit(I915_FENCE_FLAG_COMPOSITE,
|
|
|
|
&eb->requests[i]->fence.flags);
|
|
|
|
}
|
2021-10-14 17:20:00 +00:00
|
|
|
|
|
|
|
fence_array = dma_fence_array_create(eb->num_batches,
|
|
|
|
fences,
|
|
|
|
eb->context->parallel.fence_context,
|
2021-12-14 19:59:13 +00:00
|
|
|
eb->context->parallel.seqno++,
|
2021-10-14 17:20:00 +00:00
|
|
|
false);
|
|
|
|
if (!fence_array) {
|
|
|
|
kfree(fences);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move ownership to the dma_fence_array created above */
|
|
|
|
for_each_batch_create_order(eb, i)
|
|
|
|
dma_fence_get(fences[i]);
|
|
|
|
|
|
|
|
if (out_fence_fd != -1) {
|
|
|
|
out_fence = sync_file_create(&fence_array->base);
|
|
|
|
/* sync_file now owns fence_arry, drop creation ref */
|
|
|
|
dma_fence_put(&fence_array->base);
|
|
|
|
if (!out_fence)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
eb->composite_fence = &fence_array->base;
|
|
|
|
|
|
|
|
return out_fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sync_file *
|
|
|
|
eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
|
|
|
|
struct dma_fence *in_fence, int out_fence_fd)
|
|
|
|
{
|
|
|
|
struct sync_file *out_fence = NULL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (unlikely(eb->gem_context->syncobj)) {
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
|
|
|
fence = drm_syncobj_fence_get(eb->gem_context->syncobj);
|
|
|
|
err = i915_request_await_dma_fence(rq, fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_fence) {
|
|
|
|
if (eb->args->flags & I915_EXEC_FENCE_SUBMIT)
|
|
|
|
err = i915_request_await_execution(rq, in_fence);
|
|
|
|
else
|
|
|
|
err = i915_request_await_dma_fence(rq, in_fence);
|
|
|
|
if (err < 0)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb->fences) {
|
|
|
|
err = await_fence_array(eb, rq);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (intel_context_is_parallel(eb->context)) {
|
|
|
|
out_fence = eb_composite_fence_create(eb, out_fence_fd);
|
|
|
|
if (IS_ERR(out_fence))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
} else if (out_fence_fd != -1) {
|
|
|
|
out_fence = sync_file_create(&rq->fence);
|
|
|
|
if (!out_fence)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
return out_fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct intel_context *
|
|
|
|
eb_find_context(struct i915_execbuffer *eb, unsigned int context_number)
|
|
|
|
{
|
|
|
|
struct intel_context *child;
|
|
|
|
|
|
|
|
if (likely(context_number == 0))
|
|
|
|
return eb->context;
|
|
|
|
|
|
|
|
for_each_child(eb->context, child)
|
|
|
|
if (!--context_number)
|
|
|
|
return child;
|
|
|
|
|
|
|
|
GEM_BUG_ON("Context not found");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sync_file *
|
|
|
|
eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
|
|
|
|
int out_fence_fd)
|
|
|
|
{
|
|
|
|
struct sync_file *out_fence = NULL;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for_each_batch_create_order(eb, i) {
|
|
|
|
/* Allocate a request for this batch buffer nice and early. */
|
|
|
|
eb->requests[i] = i915_request_create(eb_find_context(eb, i));
|
|
|
|
if (IS_ERR(eb->requests[i])) {
|
2021-10-25 11:32:50 +00:00
|
|
|
out_fence = ERR_CAST(eb->requests[i]);
|
2021-10-14 17:20:00 +00:00
|
|
|
eb->requests[i] = NULL;
|
|
|
|
return out_fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only the first request added (committed to backend) has to
|
|
|
|
* take the in fences into account as all subsequent requests
|
|
|
|
* will have fences inserted inbetween them.
|
|
|
|
*/
|
|
|
|
if (i + 1 == eb->num_batches) {
|
|
|
|
out_fence = eb_fences_add(eb, eb->requests[i],
|
|
|
|
in_fence, out_fence_fd);
|
|
|
|
if (IS_ERR(out_fence))
|
|
|
|
return out_fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-11-29 20:22:45 +00:00
|
|
|
* Not really on stack, but we don't want to call
|
|
|
|
* kfree on the batch_snapshot when we put it, so use the
|
|
|
|
* _onstack interface.
|
2021-10-14 17:20:00 +00:00
|
|
|
*/
|
2021-11-29 20:22:45 +00:00
|
|
|
if (eb->batches[i]->vma)
|
2022-01-10 17:22:19 +00:00
|
|
|
eb->requests[i]->batch_res =
|
|
|
|
i915_vma_resource_get(eb->batches[i]->vma->resource);
|
2021-10-14 17:20:00 +00:00
|
|
|
if (eb->batch_pool) {
|
|
|
|
GEM_BUG_ON(intel_context_is_parallel(eb->context));
|
|
|
|
intel_gt_buffer_pool_mark_active(eb->batch_pool,
|
|
|
|
eb->requests[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out_fence;
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
static int
|
2017-06-15 08:14:33 +00:00
|
|
|
i915_gem_do_execbuffer(struct drm_device *dev,
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_file *file,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
2020-08-04 08:59:53 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *exec)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2018-06-08 15:53:46 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2017-06-15 08:14:33 +00:00
|
|
|
struct i915_execbuffer eb;
|
2017-01-27 09:40:08 +00:00
|
|
|
struct dma_fence *in_fence = NULL;
|
|
|
|
struct sync_file *out_fence = NULL;
|
|
|
|
int out_fence_fd = -1;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 19:32:06 +00:00
|
|
|
|
2017-09-21 11:01:35 +00:00
|
|
|
BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
|
|
|
|
~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2018-06-08 15:53:46 +00:00
|
|
|
eb.i915 = i915;
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.file = file;
|
|
|
|
eb.args = args;
|
2020-09-08 05:41:17 +00:00
|
|
|
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
args->flags |= __EXEC_HAS_RELOC;
|
2017-08-16 08:52:06 +00:00
|
|
|
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.exec = exec;
|
2020-08-19 14:08:44 +00:00
|
|
|
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
|
|
|
|
eb.vma[0].vma = NULL;
|
2021-08-03 12:48:33 +00:00
|
|
|
eb.batch_pool = NULL;
|
2017-08-16 08:52:06 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
2017-06-15 08:14:33 +00:00
|
|
|
reloc_cache_init(&eb.reloc_cache, eb.i915);
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.buffer_count = args->buffer_count;
|
2017-06-15 08:14:33 +00:00
|
|
|
eb.batch_start_offset = args->batch_start_offset;
|
2019-12-11 23:08:56 +00:00
|
|
|
eb.trampoline = NULL;
|
2017-06-15 08:14:33 +00:00
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
eb.fences = NULL;
|
2020-08-04 08:59:54 +00:00
|
|
|
eb.num_fences = 0;
|
2020-08-04 08:59:53 +00:00
|
|
|
|
2021-11-29 20:22:45 +00:00
|
|
|
eb_capture_list_clear(&eb);
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
memset(eb.requests, 0, sizeof(struct i915_request *) *
|
|
|
|
ARRAY_SIZE(eb.requests));
|
|
|
|
eb.composite_fence = NULL;
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags = 0;
|
2012-10-17 11:09:54 +00:00
|
|
|
if (args->flags & I915_EXEC_SECURE) {
|
2021-06-05 15:53:54 +00:00
|
|
|
if (GRAPHICS_VER(i915) >= 11)
|
2018-06-08 15:53:46 +00:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Return -EPERM to trigger fallback code on old binaries. */
|
|
|
|
if (!HAS_SECURE_BATCHES(i915))
|
|
|
|
return -EPERM;
|
|
|
|
|
2016-06-21 08:54:20 +00:00
|
|
|
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
|
2018-06-08 15:53:46 +00:00
|
|
|
return -EPERM;
|
2012-10-17 11:09:54 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags |= I915_DISPATCH_SECURE;
|
2012-10-17 11:09:54 +00:00
|
|
|
}
|
2012-12-17 15:21:27 +00:00
|
|
|
if (args->flags & I915_EXEC_IS_PINNED)
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb.batch_flags |= I915_DISPATCH_PINNED;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
err = parse_execbuf2_extensions(args, &eb);
|
|
|
|
if (err)
|
|
|
|
goto err_ext;
|
|
|
|
|
|
|
|
err = add_fence_array(&eb);
|
|
|
|
if (err)
|
|
|
|
goto err_ext;
|
|
|
|
|
2020-05-13 18:09:37 +00:00
|
|
|
#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
|
|
|
|
if (args->flags & IN_FENCES) {
|
|
|
|
if ((args->flags & IN_FENCES) == IN_FENCES)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
|
2020-08-04 08:59:54 +00:00
|
|
|
if (!in_fence) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_ext;
|
|
|
|
}
|
2017-01-27 09:40:08 +00:00
|
|
|
}
|
2020-05-13 18:09:37 +00:00
|
|
|
#undef IN_FENCES
|
2019-05-21 21:11:34 +00:00
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (args->flags & I915_EXEC_FENCE_OUT) {
|
|
|
|
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
|
if (out_fence_fd < 0) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err = out_fence_fd;
|
2020-05-13 18:09:37 +00:00
|
|
|
goto err_in_fence;
|
2017-01-27 09:40:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
err = eb_create(&eb);
|
|
|
|
if (err)
|
2020-08-04 08:59:54 +00:00
|
|
|
goto err_out_fence;
|
2020-08-04 08:59:53 +00:00
|
|
|
|
2017-06-29 15:04:25 +00:00
|
|
|
GEM_BUG_ON(!eb.lut_size);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2017-06-20 11:05:47 +00:00
|
|
|
err = eb_select_context(&eb);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err_destroy;
|
|
|
|
|
2020-08-19 14:08:52 +00:00
|
|
|
err = eb_select_engine(&eb);
|
2019-02-07 07:18:22 +00:00
|
|
|
if (unlikely(err))
|
2019-08-15 20:57:09 +00:00
|
|
|
goto err_context;
|
2019-02-07 07:18:22 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
err = eb_lookup_vmas(&eb);
|
|
|
|
if (err) {
|
2021-06-10 14:35:25 +00:00
|
|
|
eb_release_vmas(&eb, true);
|
2020-08-19 14:08:48 +00:00
|
|
|
goto err_engine;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_ww_ctx_init(&eb.ww, true);
|
|
|
|
|
2020-08-19 14:08:47 +00:00
|
|
|
err = eb_relocate_parse(&eb);
|
2017-07-21 14:50:36 +00:00
|
|
|
if (err) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/*
|
|
|
|
* If the user expects the execobject.offset and
|
|
|
|
* reloc.presumed_offset to be an exact match,
|
|
|
|
* as for using NO_RELOC, then we cannot update
|
|
|
|
* the execobject.offset until we have completed
|
|
|
|
* relocation.
|
|
|
|
*/
|
|
|
|
args->flags &= ~__EXEC_HAS_RELOC;
|
|
|
|
goto err_vma;
|
2017-07-21 14:50:36 +00:00
|
|
|
}
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2020-08-19 14:08:48 +00:00
|
|
|
ww_acquire_done(&eb.ww.ctx);
|
2022-06-29 17:43:45 +00:00
|
|
|
err = eb_capture_stage(&eb);
|
|
|
|
if (err)
|
|
|
|
goto err_vma;
|
2020-03-03 20:43:44 +00:00
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
|
|
|
|
if (IS_ERR(out_fence)) {
|
|
|
|
err = PTR_ERR(out_fence);
|
2021-12-02 04:48:31 +00:00
|
|
|
out_fence = NULL;
|
2021-10-14 17:20:00 +00:00
|
|
|
if (eb.requests[0])
|
2017-08-15 14:57:33 +00:00
|
|
|
goto err_request;
|
2021-10-14 17:20:00 +00:00
|
|
|
else
|
|
|
|
goto err_vma;
|
2017-08-15 14:57:33 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
err = eb_submit(&eb);
|
drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7.
Instead of doing what we do currently, which will never work with
PROVE_LOCKING, do the same as AMD does, and something similar to
relocation slowpath. When all locks are dropped, we acquire the
pages for pinning. When the locks are taken, we transfer those
pages in .get_pages() to the bo. As a final check before installing
the fences, we ensure that the mmu notifier was not called; if it is,
we return -EAGAIN to userspace to signal it has to start over.
Changes since v1:
- Unbinding is done in submit_init only. submit_begin() removed.
- MMU_NOTFIER -> MMU_NOTIFIER
Changes since v2:
- Make i915->mm.notifier a spinlock.
Changes since v3:
- Add WARN_ON if there are any page references left, should have been 0.
- Return 0 on success in submit_init(), bug from spinlock conversion.
- Release pvec outside of notifier_lock (Thomas).
Changes since v4:
- Mention why we're clearing eb->[i + 1].vma in the code. (Thomas)
- Actually check all invalidations in eb_move_to_gpu. (Thomas)
- Do not wait when process is exiting to fix gem_ctx_persistence.userptr.
Changes since v5:
- Clarify why check on PF_EXITING is (temporarily) required.
Changes since v6:
- Ensure userptr validity is checked in set_domain through a special path.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Dave Airlie <airlied@redhat.com>
[danvet: s/kfree/kvfree/ in i915_gem_object_userptr_drop_ref in the
previous review round, but which got lost. The other open questions
around page refcount are imo better discussed in a separate series,
with amdgpu folks involved].
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-17-maarten.lankhorst@linux.intel.com
2021-03-23 15:50:05 +00:00
|
|
|
|
drm/i915: Late request cancellations are harmful
Conceptually, each request is a record of a hardware transaction - we
build up a list of pending commands and then either commit them to
hardware, or cancel them. However, whilst building up the list of
pending commands, we may modify state outside of the request and make
references to the pending request. If we do so and then cancel that
request, external objects then point to the deleted request leading to
both graphical and memory corruption.
The easiest example is to consider object/VMA tracking. When we mark an
object as active in a request, we store a pointer to this, the most
recent request, in the object. Then we want to free that object, we wait
for the most recent request to be idle before proceeding (otherwise the
hardware will write to pages now owned by the system, or we will attempt
to read from those pages before the hardware is finished writing). If
the request was cancelled instead, that wait completes immediately. As a
result, all requests must be committed and not cancelled if the external
state is unknown.
All that remains of i915_gem_request_cancel() users are just a couple of
extremely unlikely allocation failures, so remove the API entirely.
A consequence of committing all incomplete requests is that we generate
excess breadcrumbs and fill the ring much more often with dummy work. We
have completely undone the outstanding_last_seqno optimisation.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93907
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-16-git-send-email-chris@chris-wilson.co.uk
2016-04-13 16:35:15 +00:00
|
|
|
err_request:
|
2021-10-14 17:20:00 +00:00
|
|
|
eb_requests_get(&eb);
|
|
|
|
err = eb_requests_add(&eb, err);
|
2017-03-02 12:25:25 +00:00
|
|
|
|
2020-08-04 08:59:54 +00:00
|
|
|
if (eb.fences)
|
2021-10-14 17:20:00 +00:00
|
|
|
signal_fence_array(&eb, eb.composite_fence ?
|
|
|
|
eb.composite_fence :
|
|
|
|
&eb.requests[0]->fence);
|
2017-08-15 14:57:33 +00:00
|
|
|
|
2017-01-27 09:40:08 +00:00
|
|
|
if (out_fence) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (err == 0) {
|
2017-01-27 09:40:08 +00:00
|
|
|
fd_install(out_fence_fd, out_fence->file);
|
2018-02-14 19:18:25 +00:00
|
|
|
args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
|
2017-01-27 09:40:08 +00:00
|
|
|
args->rsvd2 |= (u64)out_fence_fd << 32;
|
|
|
|
out_fence_fd = -1;
|
|
|
|
} else {
|
|
|
|
fput(out_fence->file);
|
|
|
|
}
|
|
|
|
}
|
2021-07-08 15:48:12 +00:00
|
|
|
|
|
|
|
if (unlikely(eb.gem_context->syncobj)) {
|
|
|
|
drm_syncobj_replace_fence(eb.gem_context->syncobj,
|
2021-10-14 17:20:00 +00:00
|
|
|
eb.composite_fence ?
|
|
|
|
eb.composite_fence :
|
|
|
|
&eb.requests[0]->fence);
|
2021-07-08 15:48:12 +00:00
|
|
|
}
|
|
|
|
|
2021-10-14 17:20:00 +00:00
|
|
|
if (!out_fence && eb.composite_fence)
|
|
|
|
dma_fence_put(eb.composite_fence);
|
|
|
|
|
|
|
|
eb_requests_put(&eb);
|
2010-11-25 18:00:26 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
err_vma:
|
2021-06-10 14:35:25 +00:00
|
|
|
eb_release_vmas(&eb, true);
|
2020-08-19 14:08:48 +00:00
|
|
|
WARN_ON(err == -EDEADLK);
|
|
|
|
i915_gem_ww_ctx_fini(&eb.ww);
|
|
|
|
|
|
|
|
if (eb.batch_pool)
|
|
|
|
intel_gt_buffer_pool_put(eb.batch_pool);
|
|
|
|
err_engine:
|
2020-08-19 14:08:52 +00:00
|
|
|
eb_put_engine(&eb);
|
2019-08-04 12:48:25 +00:00
|
|
|
err_context:
|
2019-04-25 05:01:43 +00:00
|
|
|
i915_gem_context_put(eb.gem_context);
|
2017-06-20 11:05:47 +00:00
|
|
|
err_destroy:
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
eb_destroy(&eb);
|
2017-06-29 15:04:25 +00:00
|
|
|
err_out_fence:
|
2017-01-27 09:40:08 +00:00
|
|
|
if (out_fence_fd != -1)
|
|
|
|
put_unused_fd(out_fence_fd);
|
2017-02-03 22:45:29 +00:00
|
|
|
err_in_fence:
|
2017-01-27 09:40:08 +00:00
|
|
|
dma_fence_put(in_fence);
|
2020-08-04 08:59:54 +00:00
|
|
|
err_ext:
|
|
|
|
put_fence_array(eb.fences, eb.num_fences);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
static size_t eb_element_size(void)
|
|
|
|
{
|
2020-08-19 14:08:44 +00:00
|
|
|
return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
|
2017-11-16 10:50:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool check_buffer_count(size_t count)
|
|
|
|
{
|
|
|
|
const size_t sz = eb_element_size();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
|
|
|
|
* array size (see eb_create()). Otherwise, we can accept an array as
|
|
|
|
* large as can be addressed (though use large arrays at your peril)!
|
|
|
|
*/
|
|
|
|
|
|
|
|
return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
|
|
|
|
}
|
|
|
|
|
2010-11-25 18:00:26 +00:00
|
|
|
int
|
2018-02-07 16:48:41 +00:00
|
|
|
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
2010-11-25 18:00:26 +00:00
|
|
|
{
|
2020-01-22 12:57:50 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
2010-11-25 18:00:26 +00:00
|
|
|
struct drm_i915_gem_execbuffer2 *args = data;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
struct drm_i915_gem_exec_object2 *exec2_list;
|
2017-11-16 10:50:59 +00:00
|
|
|
const size_t count = args->buffer_count;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
int err;
|
2010-11-25 18:00:26 +00:00
|
|
|
|
2017-11-16 10:50:59 +00:00
|
|
|
if (!check_buffer_count(count)) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-12-09 12:23:14 +00:00
|
|
|
err = i915_gem_check_execbuffer(args);
|
|
|
|
if (err)
|
|
|
|
return err;
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
/* Allocate extra slots for use by the command parser */
|
|
|
|
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
|
2017-09-13 23:28:29 +00:00
|
|
|
__GFP_NOWARN | GFP_KERNEL);
|
2010-11-25 18:00:26 +00:00
|
|
|
if (exec2_list == NULL) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
|
|
|
|
count);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (copy_from_user(exec2_list,
|
|
|
|
u64_to_user_ptr(args->buffers_ptr),
|
2017-11-16 10:50:59 +00:00
|
|
|
sizeof(*exec2_list) * count)) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
|
2017-05-17 12:23:12 +00:00
|
|
|
kvfree(exec2_list);
|
2010-11-25 18:00:26 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-08-04 08:59:53 +00:00
|
|
|
err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have begun execution of the batchbuffer, we ignore
|
|
|
|
* any new error after this point. Also given that we have already
|
|
|
|
* updated the associated relocations, we try to write out the current
|
|
|
|
* object locations irrespective of any error.
|
|
|
|
*/
|
|
|
|
if (args->flags & __EXEC_HAS_RELOC) {
|
2014-06-13 13:42:51 +00:00
|
|
|
struct drm_i915_gem_exec_object2 __user *user_exec_list =
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
u64_to_user_ptr(args->buffers_ptr);
|
|
|
|
unsigned int i;
|
2014-05-23 09:45:52 +00:00
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
2019-01-04 20:56:09 +00:00
|
|
|
/*
|
|
|
|
* Note: count * sizeof(*user_exec_list) does not overflow,
|
|
|
|
* because we checked 'count' in check_buffer_count().
|
|
|
|
*
|
|
|
|
* And this range already got effectively checked earlier
|
|
|
|
* when we did the "copy_from_user()" above.
|
|
|
|
*/
|
2020-04-03 07:20:52 +00:00
|
|
|
if (!user_write_access_begin(user_exec_list,
|
|
|
|
count * sizeof(*user_exec_list)))
|
2019-02-28 12:52:31 +00:00
|
|
|
goto end;
|
2019-01-04 20:56:09 +00:00
|
|
|
|
2014-05-23 09:45:52 +00:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
if (!(exec2_list[i].offset & UPDATE))
|
|
|
|
continue;
|
|
|
|
|
2015-12-29 17:24:52 +00:00
|
|
|
exec2_list[i].offset =
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
|
|
|
|
unsafe_put_user(exec2_list[i].offset,
|
|
|
|
&user_exec_list[i].offset,
|
|
|
|
end_user);
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
end_user:
|
2020-04-03 07:20:52 +00:00
|
|
|
user_write_access_end();
|
2019-02-28 12:52:31 +00:00
|
|
|
end:;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|
|
|
|
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
|
2017-05-17 12:23:12 +00:00
|
|
|
kvfree(exec2_list);
|
drm/i915: Eliminate lots of iterations over the execobjects array
The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.
Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.
The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.
The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.
v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-06-16 14:05:19 +00:00
|
|
|
return err;
|
2010-11-25 18:00:26 +00:00
|
|
|
}
|