mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 22:26:55 +00:00
drm/i915: Prevent use-after-free in invalidate_range_start callback
It's possible for invalidate_range_start mmu notifier callback to race against userptr object release. If the gem object was released prior to obtaining the spinlock in invalidate_range_start we're hitting null pointer dereference. Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close Testcase: igt/gem_userptr_blits/stress-mm-invalidate-close-overlap Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org [Jani: added code comment suggested by Chris] Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
parent
1293eaa3eb
commit
460822b0b1
1 changed files with 18 additions and 2 deletions
|
@ -113,7 +113,10 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
obj = mo->obj;
|
obj = mo->obj;
|
||||||
drm_gem_object_reference(&obj->base);
|
|
||||||
|
if (!kref_get_unless_zero(&obj->base.refcount))
|
||||||
|
continue;
|
||||||
|
|
||||||
spin_unlock(&mn->lock);
|
spin_unlock(&mn->lock);
|
||||||
|
|
||||||
cancel_userptr(obj);
|
cancel_userptr(obj);
|
||||||
|
@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||||
if (it != NULL) {
|
if (it != NULL) {
|
||||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||||
drm_gem_object_reference(&obj->base);
|
|
||||||
|
/* The mmu_object is released late when destroying the
|
||||||
|
* GEM object so it is entirely possible to gain a
|
||||||
|
* reference on an object in the process of being freed
|
||||||
|
* since our serialisation is via the spinlock and not
|
||||||
|
* the struct_mutex - and consequently use it after it
|
||||||
|
* is freed and then double free it.
|
||||||
|
*/
|
||||||
|
if (!kref_get_unless_zero(&obj->base.refcount)) {
|
||||||
|
spin_unlock(&mn->lock);
|
||||||
|
serial = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
serial = mn->serial;
|
serial = mn->serial;
|
||||||
}
|
}
|
||||||
spin_unlock(&mn->lock);
|
spin_unlock(&mn->lock);
|
||||||
|
|
Loading…
Reference in a new issue