drm/i915: Use a single page table lock for each gtt.

We may create page table objects on the fly, but we may need to
wait with the ww lock held. Instead of waiting on a freed obj
lock, ensure we have the same lock for each object to keep
-EDEADLK working. This ensures that i915_vma_pin_ww can lock
the page tables when required.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-41-maarten.lankhorst@linux.intel.com
This commit is contained in:
Maarten Lankhorst 2021-03-23 16:50:29 +01:00 committed by Daniel Vetter
parent 988d4ff6e3
commit 26ad4f8b73
5 changed files with 56 additions and 3 deletions

View file

@ -647,7 +647,9 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
if (err)
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@ -734,6 +736,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
@ -1115,6 +1118,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
ggtt->vm.dma = i915->drm.dev;
dma_resv_init(&ggtt->vm.resv);
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
@ -1122,8 +1126,10 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
if (ret)
if (ret) {
dma_resv_fini(&ggtt->vm.resv);
return ret;
}
if ((ggtt->vm.total - 1) >> 32) {
drm_err(&i915->drm,

View file

@ -13,16 +13,36 @@
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
return i915_gem_object_create_internal(vm->i915, sz);
obj = i915_gem_object_create_internal(vm->i915, sz);
/* ensure all dma objects have the same reservation class */
if (!IS_ERR(obj))
obj->base.resv = &vm->resv;
return obj;
}
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
int err;
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
i915_gem_object_unlock(obj);
if (err)
return err;
i915_gem_object_make_unshrinkable(obj);
return 0;
}
int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
@ -56,6 +76,20 @@ void __i915_vm_close(struct i915_address_space *vm)
mutex_unlock(&vm->mutex);
}
/* lock the vm into the current ww, if we lock one, we lock all */
int i915_vm_lock_objects(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww)
{
if (vm->scratch[0]->base.resv == &vm->resv) {
return i915_gem_object_lock(vm->scratch[0], ww);
} else {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
/* We borrowed the scratch page from ggtt, take the top level object */
return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
}
}
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
@ -69,6 +103,7 @@ static void __i915_vm_release(struct work_struct *work)
vm->cleanup(vm);
i915_address_space_fini(vm);
dma_resv_fini(&vm->resv);
kfree(vm);
}
@ -98,6 +133,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
dma_resv_init(&vm->resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);

View file

@ -238,6 +238,7 @@ struct i915_address_space {
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
@ -346,6 +347,9 @@ struct i915_ppgtt {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
int __must_check
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
static inline bool
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
@ -522,6 +526,7 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);

View file

@ -262,7 +262,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
err = pin_pt_dma(vm, pt->base);
err = pin_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
@ -304,6 +304,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
dma_resv_init(&ppgtt->vm.resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;

View file

@ -884,6 +884,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
goto err_rpm;
work = i915_vma_work();
if (!work) {
err = -ENOMEM;