drm/i915: Invalidate the TLBs on each GT

With multi-GT devices, the object may have been bound on each GT.
Invalidate the TLBs across all GT before releasing the pages
back to the system.

Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
Cc: Fei Yang <fei.yang@intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230801141955.383305-4-andi.shyti@linux.intel.com
This commit is contained in:
Chris Wilson 2023-08-01 16:19:54 +02:00 committed by Andi Shyti
parent a79d48846b
commit d6c531ab48
4 changed files with 24 additions and 11 deletions

View file

@ -17,6 +17,8 @@
#include "i915_selftest.h"
#include "i915_vma_resource.h"
#include "gt/intel_gt_defines.h"
struct drm_i915_gem_object;
struct intel_fronbuffer;
struct intel_memory_region;
@ -675,7 +677,7 @@ struct drm_i915_gem_object {
*/
bool dirty:1;
u32 tlb;
u32 tlb[I915_MAX_GT];
} mm;
struct {

View file

@ -193,13 +193,16 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = to_gt(i915);
struct intel_gt *gt;
int id;
if (!obj->mm.tlb)
return;
for_each_gt(gt, i915, id) {
if (!obj->mm.tlb[id])
return;
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb);
obj->mm.tlb = 0;
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
obj->mm.tlb[id] = 0;
}
}
struct sg_table *

View file

@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
#include "gen8_ppgtt.h"
@ -210,8 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm,
return;
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
if (vma_res->tlb)
vma_invalidate_tlb(vm, vma_res->tlb);
vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)

View file

@ -1340,6 +1340,12 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{
struct intel_gt *gt;
int id;
if (!tlb)
return;
/*
* Before we release the pages that were bound by this vma, we
* must invalidate all the TLBs that may still have a reference
@ -1348,7 +1354,9 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
* the most recent TLB invalidation seqno, and if we have not yet
* flushed the TLBs upon release, perform a full invalidation.
*/
WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
for_each_gt(gt, vm->i915, id)
WRITE_ONCE(tlb[id],
intel_gt_next_invalidate_tlb_full(vm->gt));
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
@ -1993,7 +2001,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
if (async)
unbind_fence = i915_vma_resource_unbind(vma_res,
&vma->obj->mm.tlb);
vma->obj->mm.tlb);
else
unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
@ -2010,7 +2018,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
}
/*