drm/nouveau/prime: Extend DMA reservation object lock

Prior to commit 019cbd4a4f ("drm/nouveau: Initialize GEM object before
TTM object"), the reservation object was locked across all of the buffer
object creation.

After splitting nouveau_bo_new() into separate nouveau_bo_alloc() and
nouveau_bo_init() functions, the reservation object is passed to the
latter, so the lock needs to be held across that function as well.

Fixes: 019cbd4a4f ("drm/nouveau: Initialize GEM object before TTM object")
Signed-off-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Thierry Reding 2019-09-16 16:19:24 +02:00 committed by Ben Skeggs
parent 9ca7f7968b
commit 0bb21c9677

View file

@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_gem_object *obj;
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
@ -71,9 +72,10 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
dma_resv_unlock(robj);
if (IS_ERR(nvbo))
return ERR_CAST(nvbo);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
}
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
@ -82,16 +84,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
obj = ERR_PTR(-ENOMEM);
goto unlock;
}
ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(ret);
obj = ERR_PTR(ret);
goto unlock;
}
return &nvbo->bo.base;
obj = &nvbo->bo.base;
unlock:
dma_resv_unlock(robj);
return obj;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)