drm/radeon: use mmu_interval_notifier_insert

The new API is an exact match for the needs of radeon.

For some reason radeon tries to remove overlapping ranges from the
interval tree, but interval trees (and mmu_interval_notifier_insert())
support overlapping ranges directly. Simply delete all this code.

Since this driver is missing a invalidate_range_end callback, but
still calls get_user_pages(), it cannot be correct against all races.

Link: https://lore.kernel.org/r/20191112202231.3856-8-jgg@ziepe.ca
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2019-11-12 16:22:24 -04:00
parent 3889551db2
commit 3506ff69c3
2 changed files with 51 additions and 176 deletions

View File

@ -68,6 +68,10 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
#ifdef CONFIG_MMU_NOTIFIER
#include <linux/mmu_notifier.h>
#endif
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@ -509,8 +513,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
struct radeon_mn *mn;
struct list_head mn_list;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_interval_notifier notifier;
#endif
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)

View File

@ -36,131 +36,51 @@
#include "radeon.h"
struct radeon_mn {
struct mmu_notifier mn;
/* objects protected by lock */
struct mutex lock;
struct rb_root_cached objects;
};
struct radeon_mn_node {
struct interval_tree_node it;
struct list_head bos;
};
/**
* radeon_mn_invalidate_range_start - callback to notify about mm change
* radeon_mn_invalidate - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
* @range: the VMA under invalidation
*
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
unsigned long end;
int ret = 0;
long r;
/* notification is exclusive, but interval is inclusive */
end = range->end - 1;
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
return true;
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
if (mmu_notifier_range_blockable(range))
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
if (!mmu_notifier_range_blockable(range))
return false;
it = interval_tree_iter_first(&rmn->objects, range->start, end);
while (it) {
struct radeon_mn_node *node;
struct radeon_bo *bo;
long r;
if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
goto out_unlock;
}
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
continue;
r = radeon_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
radeon_bo_unreserve(bo);
}
r = radeon_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
return true;
}
out_unlock:
mutex_unlock(&rmn->lock);
return ret;
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
radeon_bo_unreserve(bo);
return true;
}
static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct mmu_notifier_range range = {
.mm = mm,
.start = 0,
.end = ULONG_MAX,
.flags = 0,
.event = MMU_NOTIFY_UNMAP,
};
radeon_mn_invalidate_range_start(mn, &range);
}
static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
{
struct radeon_mn *rmn;
rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
if (!rmn)
return ERR_PTR(-ENOMEM);
mutex_init(&rmn->lock);
rmn->objects = RB_ROOT_CACHED;
return &rmn->mn;
}
static void radeon_mn_free_notifier(struct mmu_notifier *mn)
{
kfree(container_of(mn, struct radeon_mn, mn));
}
static const struct mmu_notifier_ops radeon_mn_ops = {
.release = radeon_mn_release,
.invalidate_range_start = radeon_mn_invalidate_range_start,
.alloc_notifier = radeon_mn_alloc_notifier,
.free_notifier = radeon_mn_free_notifier,
static const struct mmu_interval_notifier_ops radeon_mn_ops = {
.invalidate = radeon_mn_invalidate,
};
/**
@ -174,51 +94,20 @@ static const struct mmu_notifier_ops radeon_mn_ops = {
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
{
unsigned long end = addr + radeon_bo_size(bo) - 1;
struct mmu_notifier *mn;
struct radeon_mn *rmn;
struct radeon_mn_node *node = NULL;
struct list_head bos;
struct interval_tree_node *it;
int ret;
mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
if (IS_ERR(mn))
return PTR_ERR(mn);
rmn = container_of(mn, struct radeon_mn, mn);
INIT_LIST_HEAD(&bos);
mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
node = container_of(it, struct radeon_mn_node, it);
interval_tree_remove(&node->it, &rmn->objects);
addr = min(it->start, addr);
end = max(it->last, end);
list_splice(&node->bos, &bos);
}
if (!node) {
node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
if (!node) {
mutex_unlock(&rmn->lock);
return -ENOMEM;
}
}
bo->mn = rmn;
node->it.start = addr;
node->it.last = end;
INIT_LIST_HEAD(&node->bos);
list_splice(&bos, &node->bos);
list_add(&bo->mn_list, &node->bos);
interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock);
ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
radeon_bo_size(bo), &radeon_mn_ops);
if (ret)
return ret;
/*
* FIXME: radeon appears to allow get_user_pages to run during
* invalidate_range_start/end, which is not a safe way to read the
* PTEs. It should use the mmu_interval_read_begin() scheme around the
* get_user_pages to ensure that the PTEs are read properly
*/
mmu_interval_read_begin(&bo->notifier);
return 0;
}
@ -231,27 +120,8 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
*/
void radeon_mn_unregister(struct radeon_bo *bo)
{
struct radeon_mn *rmn = bo->mn;
struct list_head *head;
if (!rmn)
if (!bo->notifier.mm)
return;
mutex_lock(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
list_del(&bo->mn_list);
if (list_empty(head)) {
struct radeon_mn_node *node;
node = container_of(head, struct radeon_mn_node, bos);
interval_tree_remove(&node->it, &rmn->objects);
kfree(node);
}
mutex_unlock(&rmn->lock);
mmu_notifier_put(&rmn->mn);
bo->mn = NULL;
mmu_interval_notifier_remove(&bo->notifier);
bo->notifier.mm = NULL;
}